From 850e44f7c6376e5b93e9b3657115751427aa1540 Mon Sep 17 00:00:00 2001 From: ghidorahrex Date: Thu, 27 Oct 2022 14:55:20 -0400 Subject: [PATCH] GP-2718: Resolved several open ARM instruction conflicts --- .../Processors/ARM/data/languages/ARM.ldefs | 38 +- .../ARM/data/languages/ARMinstructions.sinc | 820 ++++++------- .../ARM/data/languages/ARMneon.sinc | 1061 +++++++++-------- .../Processors/ARM/data/languages/ARMv8.sinc | 151 ++- 4 files changed, 1064 insertions(+), 1006 deletions(-) diff --git a/Ghidra/Processors/ARM/data/languages/ARM.ldefs b/Ghidra/Processors/ARM/data/languages/ARM.ldefs index d327f8806c..1f56a7227f 100644 --- a/Ghidra/Processors/ARM/data/languages/ARM.ldefs +++ b/Ghidra/Processors/ARM/data/languages/ARM.ldefs @@ -5,7 +5,7 @@ endian="little" size="32" variant="v8" - version="1.104" + version="1.105" slafile="ARM8_le.sla" processorspec="ARMt.pspec" manualindexfile="../manuals/ARM.idx" @@ -29,7 +29,7 @@ endian="little" size="32" variant="v8T" - version="1.104" + version="1.105" slafile="ARM8_le.sla" processorspec="ARMtTHUMB.pspec" manualindexfile="../manuals/ARM.idx" @@ -49,7 +49,7 @@ instructionEndian="little" size="32" variant="v8LEInstruction" - version="1.104" + version="1.105" slafile="ARM8_le.sla" processorspec="ARMt.pspec" manualindexfile="../manuals/ARM.idx" @@ -64,7 +64,7 @@ endian="big" size="32" variant="v8" - version="1.104" + version="1.105" slafile="ARM8_be.sla" processorspec="ARMt.pspec" manualindexfile="../manuals/ARM.idx" @@ -87,7 +87,7 @@ endian="big" size="32" variant="v8T" - version="1.104" + version="1.105" slafile="ARM8_be.sla" processorspec="ARMtTHUMB.pspec" manualindexfile="../manuals/ARM.idx" @@ -104,7 +104,7 @@ endian="little" size="32" variant="v7" - version="1.104" + version="1.105" slafile="ARM7_le.sla" processorspec="ARMt.pspec" manualindexfile="../manuals/ARM.idx" @@ -124,7 +124,7 @@ instructionEndian="little" size="32" variant="v7LEInstruction" - version="1.104" + version="1.105" slafile="ARM7_le.sla" processorspec="ARMt.pspec" manualindexfile="../manuals/ARM.idx" @@ -139,7 +139,7 @@ endian="big" size="32" variant="v7" - version="1.104" + version="1.105" slafile="ARM7_be.sla" processorspec="ARMt.pspec" manualindexfile="../manuals/ARM.idx" @@ -157,7 +157,7 @@ endian="little" size="32" variant="Cortex" - version="1.104" + version="1.105" slafile="ARM7_le.sla" processorspec="ARMCortex.pspec" manualindexfile="../manuals/ARM.idx" @@ -177,7 +177,7 @@ endian="big" size="32" variant="Cortex" - version="1.104" + version="1.105" slafile="ARM7_be.sla" processorspec="ARMCortex.pspec" manualindexfile="../manuals/ARM.idx" @@ -196,7 +196,7 @@ endian="little" size="32" variant="v6" - version="1.104" + version="1.105" slafile="ARM6_le.sla" processorspec="ARMt_v6.pspec" manualindexfile="../manuals/ARM.idx" @@ -216,7 +216,7 @@ endian="big" size="32" variant="v6" - version="1.104" + version="1.105" slafile="ARM6_be.sla" processorspec="ARMt_v6.pspec" manualindexfile="../manuals/ARM.idx" @@ -236,7 +236,7 @@ endian="little" size="32" variant="v5t" - version="1.104" + version="1.105" slafile="ARM5t_le.sla" processorspec="ARMt_v45.pspec" manualindexfile="../manuals/ARM.idx" @@ -254,7 +254,7 @@ endian="big" size="32" variant="v5t" - version="1.104" + version="1.105" slafile="ARM5t_be.sla" processorspec="ARMt_v45.pspec" manualindexfile="../manuals/ARM.idx" @@ -272,7 +272,7 @@ endian="little" size="32" variant="v5" - version="1.104" + version="1.105" slafile="ARM5_le.sla" processorspec="ARM_v45.pspec" manualindexfile="../manuals/ARM.idx" @@ -304,7 +304,7 @@ endian="little" size="32" variant="v4t" - version="1.104" + version="1.105" slafile="ARM4t_le.sla" processorspec="ARMt_v45.pspec" manualindexfile="../manuals/ARM.idx" @@ -321,7 +321,7 @@ endian="big" size="32" variant="v4t" - version="1.104" + version="1.105" slafile="ARM4t_be.sla" processorspec="ARMt_v45.pspec" manualindexfile="../manuals/ARM.idx" @@ -338,7 +338,7 @@ endian="little" size="32" variant="v4" - version="1.104" + version="1.105" slafile="ARM4_le.sla" processorspec="ARM_v45.pspec" manualindexfile="../manuals/ARM.idx" @@ -358,7 +358,7 @@ endian="big" size="32" variant="v4" - version="1.104" + version="1.105" slafile="ARM4_be.sla" processorspec="ARM_v45.pspec" manualindexfile="../manuals/ARM.idx" diff --git a/Ghidra/Processors/ARM/data/languages/ARMinstructions.sinc b/Ghidra/Processors/ARM/data/languages/ARMinstructions.sinc index 166ca66cee..c418370f9c 100644 --- a/Ghidra/Processors/ARM/data/languages/ARMinstructions.sinc +++ b/Ghidra/Processors/ARM/data/languages/ARMinstructions.sinc @@ -1669,11 +1669,11 @@ SetMode: "#"^23 is c0004=0x17 { setAbortMode(); } SetMode: "#"^27 is c0004=0x1b { setUndefinedMode(); } SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } -:cps SetMode is $(AMODE) & cond=15 & c2027=16 & c1819=0 & c1717=1 & c0916=0 & c0508=0 & SetMode { } -:cpsie IFLAGS is $(AMODE) & cond=15 & c2027=16 & c1819=2 & c1717=0 & c0916=0 & c0505=0 & c0004=0 & IFLAGS { } -:cpsid IFLAGS is $(AMODE) & cond=15 & c2027=16 & c1819=3 & c1717=0 & c0916=0 & c0505=0 & c0004=0 & IFLAGS { } -:cpsie IFLAGS, SetMode is $(AMODE) & cond=15 & c2027=16 & c1819=2 & c1717=1 & c0916=0 & c0505=0 & IFLAGS & SetMode { } -:cpsid IFLAGS, SetMode is $(AMODE) & cond=15 & c2027=16 & c1819=3 & c1717=1 & c0916=0 & c0505=0 & IFLAGS & SetMode { } +:cps SetMode is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=0 & c1717=1 & c0916=0 & c0508=0 & SetMode { } +:cpsie IFLAGS is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=2 & c1717=0 & c0916=0 & c0505=0 & c0004=0 & IFLAGS { } +:cpsid IFLAGS is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=3 & c1717=0 & c0916=0 & c0505=0 & c0004=0 & IFLAGS { } +:cpsie IFLAGS, SetMode is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=2 & c1717=1 & c0916=0 & c0505=0 & IFLAGS & SetMode { } +:cpsid IFLAGS, SetMode is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=3 & c1717=1 & c0916=0 & c0505=0 & IFLAGS & SetMode { } @endif # VERSION_6 @@ -1709,7 +1709,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } @if defined(VERSION_6) -:rfeia rn is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=1 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 +:rfeia rn is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=1 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = rn; @@ -1719,7 +1719,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } return [pc]; } -:rfeib rn is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=1 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 +:rfeib rn is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=1 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = rn + 4; @@ -1729,7 +1729,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } return [pc]; } -:rfeda rn is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=0 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 +:rfeda rn is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=0 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = rn; @@ -1739,7 +1739,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } return [pc]; } -:rfedb rn is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=0 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 +:rfedb rn is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=0 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = rn - 4; @@ -1749,7 +1749,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } return [pc]; } -:rfeia Rn! is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=1 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 +:rfeia Rn! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=1 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = Rn; @@ -1760,7 +1760,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } return [pc]; } -:rfeib Rn! is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=1 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 +:rfeib Rn! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=1 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = Rn + 4; @@ -1771,7 +1771,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } return [pc]; } -:rfeda Rn! is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=0 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 +:rfeda Rn! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=0 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = Rn; @@ -1782,7 +1782,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } return [pc]; } -:rfedb Rn! is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=0 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 +:rfedb Rn! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=0 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = Rn - 4; @@ -1793,7 +1793,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } return [pc]; } -:srsia SRSMode is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=1 & S22=1 & W21=0 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +:srsia SRSMode is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=1 & S22=1 & W21=0 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; @@ -1803,7 +1803,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } ptr = ptr + 4; } -:srsib SRSMode is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=1 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +:srsib SRSMode is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=1 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp + 4; @@ -1812,7 +1812,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } *ptr = spsr; } -:srsda SRSMode is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=0 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +:srsda SRSMode is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=0 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; @@ -1822,7 +1822,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } ptr = ptr - 4; } -:srsdb SRSMode is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=0 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +:srsdb SRSMode is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=0 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp - 4; @@ -1831,7 +1831,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } *ptr = spsr; } -:srsia SRSMode! is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=1 & S22=1 & W21=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +:srsia SRSMode! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=1 & S22=1 & W21=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; @@ -1842,7 +1842,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } sp = ptr; } -:srsib SRSMode! is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=1 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +:srsib SRSMode! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=1 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp + 4; @@ -1852,7 +1852,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } sp = ptr; } -:srsda SRSMode! is $(AMODE) & cond=15 & c2527=4 & P24=0 & U23=0 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +:srsda SRSMode! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=0 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; @@ -1863,7 +1863,7 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } sp = ptr; } -:srsdb SRSMode! is $(AMODE) & cond=15 & c2527=4 & P24=1 & U23=0 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode +:srsdb SRSMode! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=0 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; @@ -1878,13 +1878,13 @@ SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } @if defined(VERSION_5) -:stc2 cpn,CRd,addrmode5 is $(AMODE) & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=0 +:stc2 cpn,CRd,addrmode5 is $(AMODE) & ARMcond=0 & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=0 { t_cpn:4 = cpn; coprocessor_store2(t_cpn,CRd,addrmode5); } -:stc2l cpn,CRd,addrmode5 is $(AMODE) & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=0 +:stc2l cpn,CRd,addrmode5 is $(AMODE) & ARMcond=0 & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=0 { t_cpn:4 = cpn; coprocessor_storelong2(t_cpn,CRd,addrmode5); @@ -1915,7 +1915,7 @@ macro sub_with_carry_flags(op1, op2){ } -:adc^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:adc^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -1926,7 +1926,7 @@ macro sub_with_carry_flags(op1, op2){ build SBIT_CZNO; } -:adc^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:adc^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -1937,7 +1937,7 @@ macro sub_with_carry_flags(op1, op2){ build SBIT_CZNO; } -:adc^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:adc^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -1948,7 +1948,7 @@ macro sub_with_carry_flags(op1, op2){ build SBIT_CZNO; } -:adc^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:adc^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -1961,7 +1961,7 @@ macro sub_with_carry_flags(op1, op2){ goto [pc]; } -:adc^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:adc^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -1974,7 +1974,7 @@ macro sub_with_carry_flags(op1, op2){ goto [pc]; } -:adc^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:adc^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -2003,13 +2003,13 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate # ADR constructors must appear before ADD constructors to give ADR parsing precedence # -:adr^COND Rd,ArmPCRelImmed12 is $(AMODE) & COND & c2527=1 & (c2024=8 | c2024=4) & Rn=15 & Rd & ArmPCRelImmed12 +:adr^COND Rd,ArmPCRelImmed12 is $(AMODE) & ARMcond=1 & COND & c2527=1 & (c2024=8 | c2024=4) & Rn=15 & Rd & ArmPCRelImmed12 { build COND; Rd = ArmPCRelImmed12; } -:adr^COND pc,ArmPCRelImmed12 is $(AMODE) & COND & c2527=1 & (c2024=8 | c2024=4) & Rn=15 & Rd=15 & pc & ArmPCRelImmed12 +:adr^COND pc,ArmPCRelImmed12 is $(AMODE) & ARMcond=1 & COND & c2527=1 & (c2024=8 | c2024=4) & Rn=15 & Rd=15 & pc & ArmPCRelImmed12 { build COND; dest:4 = ArmPCRelImmed12; @@ -2018,7 +2018,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } -:add^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:add^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -2029,7 +2029,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:add^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:add^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -2040,7 +2040,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:add^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:add^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -2051,7 +2051,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:add^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:add^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -2064,7 +2064,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:add^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:add^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -2077,7 +2077,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:add^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:add^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -2090,7 +2090,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:and^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:and^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -2101,7 +2101,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:and^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:and^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -2112,7 +2112,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:and^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:and^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -2123,7 +2123,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:and^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:and^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -2136,7 +2136,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:and^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:and^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -2149,7 +2149,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:and^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:and^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -2178,7 +2178,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6T2) -:bfc^COND Rd,lsbImm,bitWidth is $(AMODE) & COND & c2127=0x3e & msbImm & Rd & lsbImm & bitWidth & c0006=0x1f { +:bfc^COND Rd,lsbImm,bitWidth is $(AMODE) & ARMcond=1 & COND & c2127=0x3e & msbImm & Rd & lsbImm & bitWidth & c0006=0x1f { build COND; build lsbImm; build msbImm; @@ -2187,7 +2187,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate Rd = Rd & clearMask; } -:bfi^COND Rd,Rm,lsbImm,bitWidth is $(AMODE) & COND & c2127=0x3e & Rd & Rm & lsbImm & bitWidth & c0406=1 { +:bfi^COND Rd,Rm,lsbImm,bitWidth is $(AMODE) & ARMcond=1 & COND & c2127=0x3e & Rd & Rm & lsbImm & bitWidth & c0406=1 { build COND; build lsbImm; build bitWidth; @@ -2199,7 +2199,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_6T2 -:bic^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:bic^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -2210,7 +2210,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:bic^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:bic^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -2221,7 +2221,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:bic^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:bic^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -2232,7 +2232,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:bic^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:bic^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -2245,7 +2245,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:bic^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:bic^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -2258,7 +2258,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:bic^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:bic^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -2279,7 +2279,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } # bl used as a PIC instruction to get at current PC in lr -:bl^COND Addr24 is $(AMODE) & COND & c2527=5 & L24=1 & immed24=0xffffff & Addr24 +:bl^COND Addr24 is $(AMODE) & ARMcond=1 & COND & c2527=5 & L24=1 & immed24=0xffffff & Addr24 { build COND; build Addr24; @@ -2313,7 +2313,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(T_VARIANT) && defined(VERSION_5) # Two forms of blx needed to distinguish from b -:blx HAddr24 is $(AMODE) & CALLoverride=0 & cond=15 & c2527=5 & H24=0 & HAddr24 +:blx HAddr24 is $(AMODE) & CALLoverride=0 & ARMcond=0 & cond=15 & c2527=5 & H24=0 & HAddr24 { lr = inst_next; SetThumbMode(1); @@ -2321,7 +2321,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate # don't do causes decompiler trouble TB = 0; } # Always changes to THUMB mode -:blx HAddr24 is $(AMODE) & CALLoverride=1 & cond=15 & c2527=5 & H24=0 & HAddr24 +:blx HAddr24 is $(AMODE) & CALLoverride=1 & ARMcond=0 & cond=15 & c2527=5 & H24=0 & HAddr24 { lr = inst_next; SetThumbMode(1); @@ -2329,7 +2329,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } # Always changes to THUMB mode -:blx HAddr24 is $(AMODE) & CALLoverride=0 & cond=15 & c2527=5 & H24=1 & HAddr24 +:blx HAddr24 is $(AMODE) & ARMcond=0 & CALLoverride=0 & cond=15 & c2527=5 & H24=1 & HAddr24 { lr = inst_next; SetThumbMode(1); @@ -2337,7 +2337,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate # don't do causes decompiler trouble TB = 0; } # Always changes to THUMB mode -:blx HAddr24 is $(AMODE) & CALLoverride=1 & cond=15 & c2527=5 & H24=1 & HAddr24 +:blx HAddr24 is $(AMODE) & ARMcond=0 & CALLoverride=1 & cond=15 & c2527=5 & H24=1 & HAddr24 { lr = inst_next; SetThumbMode(1); @@ -2348,7 +2348,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5) -:blx^COND rm is $(AMODE) & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=3 & rm +:blx^COND rm is $(AMODE) & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=3 & rm { build COND; build rm; @@ -2358,7 +2358,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate # don't do causes decompiler trouble TB = 0; } # Optional THUMB -:blx^COND rm is $(AMODE) & CALLoverride=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=3 & rm +:blx^COND rm is $(AMODE) & CALLoverride=1 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=3 & rm { build COND; build rm; @@ -2372,7 +2372,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5_or_T) # if branching using lr, assume return -:bx^COND rm is $(AMODE) & REToverride=0 & LRset=0 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm=14 +:bx^COND rm is $(AMODE) & REToverride=0 & LRset=0 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm=14 { build COND; build rm; @@ -2380,7 +2380,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate return [pc]; } # Optional change to THUMB -:bx^COND rm is $(AMODE) & REToverride=0 & LRset=0 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm +:bx^COND rm is $(AMODE) & REToverride=0 & LRset=0 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm { build COND; build rm; @@ -2389,7 +2389,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } # Optional change to THUMB # if lr has just been set, assume call -:bx^COND rm is $(AMODE) & REToverride=0 & LRset=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm +:bx^COND rm is $(AMODE) & REToverride=0 & LRset=1 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm { build COND; build rm; @@ -2397,7 +2397,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate call [pc]; } # Optional change to THUMB -:bx^COND rm is $(AMODE) & REToverride=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm +:bx^COND rm is $(AMODE) & REToverride=1 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm { build COND; build rm; @@ -2405,7 +2405,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } # Optional change to THUMB -#:bx^COND lr is $(AMODE) & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & Rm=14 & lr +#:bx^COND lr is $(AMODE) & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & Rm=14 & lr #{ # build COND; # TB=(lr&0x00000001)!=0; @@ -2419,7 +2419,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate # bxj behaves like bx except that Jazelle state is enabled if available (added with Version-5 J-variant) -:bxj^COND rm is $(AMODE) & REToverride=0 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm +:bxj^COND rm is $(AMODE) & REToverride=0 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm { build COND; build rm; @@ -2431,7 +2431,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } # Optional change to THUMB # if branching using "ip" then is a goto -:bxj^COND rm is $(AMODE) & REToverride=0 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm & Rm=12 +:bxj^COND rm is $(AMODE) & REToverride=0 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm & Rm=12 { build COND; build rm; @@ -2442,7 +2442,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } # Optional change to THUMB -:bxj^COND rm is $(AMODE) & REToverride=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm +:bxj^COND rm is $(AMODE) & REToverride=1 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm { build COND; build rm; @@ -2457,7 +2457,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5) -:cdp2 cpn,opcode1,CRd,CRn,CRm,opcode2 is $(AMODE) & cond=15 & c2427=14 & opcode1 & CRn & CRd & cpn & opcode2 & c0404=0 & CRm +:cdp2 cpn,opcode1,CRd,CRn,CRm,opcode2 is $(AMODE) & ARMcond=0 & cond=15 & c2427=14 & opcode1 & CRn & CRd & cpn & opcode2 & c0404=0 & CRm { t_cpn:4 = cpn; t_op1:4 = opcode1; @@ -2467,7 +2467,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_5 -:cdp^COND cpn,opcode1,CRd,CRn,CRm,opcode2 is $(AMODE) & COND & c2427=14 & opcode1 & CRn & CRd & cpn & opcode2 & c0404=0 & CRm +:cdp^COND cpn,opcode1,CRd,CRn,CRm,opcode2 is $(AMODE) & ARMcond=1 & COND & c2427=14 & opcode1 & CRn & CRd & cpn & opcode2 & c0404=0 & CRm { build COND; t_cpn:4 = cpn; @@ -2486,7 +2486,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5) -:clz^COND Rd,rm is $(AMODE) & COND & c2027=22 & c1619=15 & Rd & c0811=15 & c0407=1 & rm +:clz^COND Rd,rm is $(AMODE) & ARMcond=1 & COND & c2027=22 & c1619=15 & Rd & c0811=15 & c0407=1 & rm { build COND; build rm; @@ -2495,7 +2495,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_5 -:cmn^COND rn,shift1 is $(AMODE) & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift1 +:cmn^COND rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift1 { build COND; build rn; @@ -2506,7 +2506,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate affectflags(); } -:cmn^COND rn,shift2 is $(AMODE) & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift2 +:cmn^COND rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift2 { build COND; build rn; @@ -2517,7 +2517,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate affectflags(); } -:cmn^COND rn,shift3 is $(AMODE) & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift3 +:cmn^COND rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift3 { build COND; build rn; @@ -2528,7 +2528,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate affectflags(); } -:cmp^COND rn,shift1 is $(AMODE) & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift1 +:cmp^COND rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift1 { build COND; build rn; @@ -2539,7 +2539,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate affectflags(); } -:cmp^COND rn,shift2 is $(AMODE) & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift2 +:cmp^COND rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift2 { build COND; build rn; @@ -2550,7 +2550,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate affectflags(); } -:cmp^COND rn,shift3 is $(AMODE) & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift3 +:cmp^COND rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift3 { build COND; build rn; @@ -2564,7 +2564,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6) # cpy is a pre-UAL synonym for mov -:cpy^COND pc,rm is $(AMODE) & COND & pc & c2027=0x1a & c1619=0 & c0411=0 & Rd=15 & rm +:cpy^COND pc,rm is $(AMODE) & ARMcond=1 & COND & pc & c2027=0x1a & c1619=0 & c0411=0 & Rd=15 & rm { build COND; build rm; @@ -2572,14 +2572,14 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:cpy^COND lr,rm is $(AMODE) & COND & c2027=0x1a & c1619=0 & c0411=0 & Rd=14 & lr & rm & Rm2=15 +:cpy^COND lr,rm is $(AMODE) & ARMcond=1 & COND & c2027=0x1a & c1619=0 & c0411=0 & Rd=14 & lr & rm & Rm2=15 [ LRset=1; globalset(inst_next,LRset); ] { build COND; lr = rm; } -:cpy^COND Rd,rm is $(AMODE) & COND & c2027=0x1a & c1619=0 & c0411=0 & Rd & rm +:cpy^COND Rd,rm is $(AMODE) & ARMcond=1 & COND & c2027=0x1a & c1619=0 & c0411=0 & Rd & rm { build COND; build rm; @@ -2590,7 +2590,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6K) || defined(VERSION_6T2) -:dbg^COND optionImm is $(AMODE) & COND & c0427=0x320f0f & optionImm { +:dbg^COND optionImm is $(AMODE) & ARMcond=1 & COND & c0427=0x320f0f & optionImm { @if defined(VERSION_7) build COND; build optionImm; @@ -2614,7 +2614,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_7 -:eor^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:eor^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -2625,7 +2625,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:eor^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:eor^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -2636,7 +2636,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:eor^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:eor^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -2647,7 +2647,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:eor^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:eor^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -2660,7 +2660,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:eor^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:eor^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -2673,7 +2673,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:eor^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:eor^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -2698,13 +2698,13 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate ### These must come first, because of cond=15 match @if defined(VERSION_5) -:ldc2 cpn,CRd,addrmode5 is $(AMODE) & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=1 +:ldc2 cpn,CRd,addrmode5 is $(AMODE) & ARMcond=0 & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=1 { t_cpn:4 = cpn; coprocessor_load2(t_cpn,CRd,addrmode5); } -:ldc2l cpn,CRd,addrmode5 is $(AMODE) & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=1 +:ldc2l cpn,CRd,addrmode5 is $(AMODE) & ARMcond=0 & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=1 { t_cpn:4 = cpn; coprocessor_loadlong2(t_cpn,CRd,addrmode5); @@ -2713,7 +2713,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_5 ######## cond=15 match -:ldc^COND cpn,CRd,addrmode5 is $(AMODE) & COND & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=1 +:ldc^COND cpn,CRd,addrmode5 is $(AMODE) & ARMcond=1 & COND & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=1 { build COND; build addrmode5; @@ -2721,7 +2721,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate coprocessor_load(t_cpn,CRd,addrmode5); } -:ldcl^COND cpn,CRd,addrmode5 is $(AMODE) & COND & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=1 +:ldcl^COND cpn,CRd,addrmode5 is $(AMODE) & ARMcond=1 & COND & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=1 { build COND; build addrmode5; @@ -2729,13 +2729,13 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate coprocessor_loadlong(t_cpn,CRd,addrmode5); } -:ldm^mdir^COND reglist is $(AMODE) & COND & c2527=4 & mdir & L20=1 & c1515=0 & reglist +:ldm^mdir^COND reglist is $(AMODE) & ARMcond=1 & COND & c2527=4 & mdir & L20=1 & c1515=0 & reglist { build COND; build reglist; } -:ldm^mdir^COND reglist is $(AMODE) & COND & c2527=4 & mdir & L20=1 & c1515=1 & reglist +:ldm^mdir^COND reglist is $(AMODE) & ARMcond=1 & COND & c2527=4 & mdir & L20=1 & c1515=1 & reglist { build COND; build reglist; @@ -2743,7 +2743,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate return [pc]; } -#:ldr^COND Rd,addrmode2 is $(AMODE) & COND & B22=0 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +#:ldr^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & B22=0 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 #{ # build COND; # build addrmode2; @@ -2754,7 +2754,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate #} # The following form of ldr assumes alignment checking is on -:ldr^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:ldr^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; @@ -2762,7 +2762,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } # Two forms of ldr with destination=pc needed to distinguish from ldrt -:ldr^COND pc,addrmode2 is $(AMODE) & pc & COND & LRset=1 & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=1 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:ldr^COND pc,addrmode2 is $(AMODE) & pc & ARMcond=1 & COND & LRset=1 & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=1 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 [ LRset=0; globalset(inst_next,LRset); ] { build COND; @@ -2774,7 +2774,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate SetThumbMode(0); } # No unaligned address -:ldr^COND pc,addrmode2 is $(AMODE) & pc & COND & LRset=1 & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=0 & W21=0 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:ldr^COND pc,addrmode2 is $(AMODE) & pc & ARMcond=1 & COND & LRset=1 & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=0 & W21=0 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 [ LRset=0; globalset(inst_next,LRset); ] { build COND; @@ -2787,7 +2787,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } # No unaligned address # Two forms of ldr with destination=pc needed to distinguish from ldrt -:ldr^COND pc,addrmode2 is $(AMODE) & pc & COND & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=1 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:ldr^COND pc,addrmode2 is $(AMODE) & pc & ARMcond=1 & COND & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=1 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; @@ -2796,7 +2796,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } # No unaligned address -:ldr^COND pc,addrmode2 is $(AMODE) & pc & COND & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=0 & W21=0 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:ldr^COND pc,addrmode2 is $(AMODE) & pc & ARMcond=1 & COND & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=0 & W21=0 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; @@ -2805,14 +2805,14 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } # No unaligned address -:ldrb^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=1 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:ldrb^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=1 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; Rd = zext( *:1 addrmode2); } -:ldrbt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=1 & L20=1 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:ldrbt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=1 & L20=1 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; @@ -2821,7 +2821,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5E) -:ldrd^COND Rd,Rd2,addrmode3 is $(AMODE) & COND & c2527=0 & c0407=13 & c1212=0 & L20=0 & Rd & Rd2 & addrmode3 +:ldrd^COND Rd,Rd2,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & c0407=13 & c1212=0 & L20=0 & Rd & Rd2 & addrmode3 { build COND; build addrmode3; @@ -2833,7 +2833,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6) -:ldrex^COND Rd,[Rn] is $(AMODE) & COND & c2027=0x19 & Rn & Rd & c0011=0xf9f +:ldrex^COND Rd,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x19 & Rn & Rd & c0011=0xf9f { build COND; Rd = *Rn; @@ -2843,20 +2843,20 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6K) -:ldrexb^COND Rd,[Rn] is $(AMODE) & COND & c2027=0x1d & Rn & Rd & c0011=0xf9f +:ldrexb^COND Rd,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1d & Rn & Rd & c0011=0xf9f { build COND; Rd = zext(*:1 Rn); } -:ldrexd^COND Rd,Rd2,[Rn] is $(AMODE) & COND & c2027=0x1b & Rn & Rd & Rd2 & c0011=0xf9f +:ldrexd^COND Rd,Rd2,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1b & Rn & Rd & Rd2 & c0011=0xf9f { build COND; Rd = *(Rn); Rd2 = *(Rn + 4); } -:ldrexh^COND Rd,[Rn] is $(AMODE) & COND & c2027=0x1f & Rn & Rd & c0011=0xf9f +:ldrexh^COND Rd,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1f & Rn & Rd & c0011=0xf9f { build COND; Rd = zext(*:2 Rn); @@ -2864,7 +2864,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_6K -:ldrh^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & L20=1 & c0407=11 & Rd & addrmode3 +:ldrh^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & L20=1 & c0407=11 & Rd & addrmode3 { build COND; build addrmode3; @@ -2873,7 +2873,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6T2) -:ldrht^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=11 & Rd & addrmode3 { +:ldrht^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=11 & Rd & addrmode3 { build COND; build addrmode3; Rd = zext( *:2 addrmode3); @@ -2881,7 +2881,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_6T2 -:ldrsb^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & L20=1 & c0407=13 & Rd & addrmode3 +:ldrsb^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & L20=1 & c0407=13 & Rd & addrmode3 { build COND; build addrmode3; @@ -2890,7 +2890,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6T2) -:ldrsbt^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=13 & Rd & addrmode3 { +:ldrsbt^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=13 & Rd & addrmode3 { build COND; build addrmode3; Rd = sext( *:1 addrmode3); @@ -2898,7 +2898,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_6T2 -:ldrsh^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & L20=1 & c0407=15 & Rd & addrmode3 +:ldrsh^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & L20=1 & c0407=15 & Rd & addrmode3 { build COND; build addrmode3; @@ -2907,7 +2907,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6T2) -:ldrsht^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=15 & Rd & addrmode3 { +:ldrsht^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=15 & Rd & addrmode3 { build COND; build addrmode3; Rd = sext( *:2 addrmode3); @@ -2916,7 +2916,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_6T2 # The following form of ldr assumes alignment checking is on -:ldrt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=1 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:ldrt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=1 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; @@ -2926,7 +2926,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate ###### must come first cond=15 @if defined(VERSION_5) -:mcr2 cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & cond=15 & c2427=14 & opc1 & c2020=0 & CRn & Rd & cpn & opc2 & c0404=1 & CRm +:mcr2 cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & ARMcond=0 & cond=15 & c2427=14 & opc1 & c2020=0 & CRn & Rd & cpn & opc2 & c0404=1 & CRm { t_cpn:4 = cpn; t_op1:4 = opc1; @@ -2940,7 +2940,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate # ===== START mcr :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -2949,7 +2949,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -2958,7 +2958,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -2967,7 +2967,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -2976,7 +2976,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -2985,7 +2985,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -2994,7 +2994,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3003,7 +3003,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3012,7 +3012,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3021,7 +3021,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3030,7 +3030,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3039,7 +3039,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3048,7 +3048,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3057,7 +3057,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=3 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=3 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3066,7 +3066,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=5 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=5 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3075,7 +3075,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=5 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=5 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3084,7 +3084,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=6 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=6 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3093,7 +3093,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=6 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=6 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3102,7 +3102,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3111,7 +3111,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=5 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=5 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3120,7 +3120,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=5 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=5 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3129,7 +3129,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=5 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=5 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3138,7 +3138,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=6 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=6 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3147,7 +3147,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=6 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=6 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3156,7 +3156,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=6 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=6 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3165,7 +3165,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3174,7 +3174,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3183,7 +3183,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3192,7 +3192,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3201,7 +3201,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=5 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=5 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3210,7 +3210,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=14 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=14 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3219,7 +3219,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=14 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=14 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3228,7 +3228,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=7 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=7 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3237,7 +3237,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=7 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=7 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3246,7 +3246,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=7 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=7 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3255,7 +3255,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3264,7 +3264,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3273,7 +3273,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3282,7 +3282,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3291,7 +3291,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3300,7 +3300,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=2 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=15 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=2 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=15 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3309,7 +3309,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3319,7 +3319,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=2 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=2 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3329,7 +3329,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=4 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=2 & c2427=14 & COND & + $(AMODE) & CRm=4 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=2 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3339,7 +3339,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3349,7 +3349,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3359,7 +3359,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3369,7 +3369,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=5 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=5 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3379,7 +3379,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3389,7 +3389,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3399,7 +3399,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mcr^COND mcrOperands is - $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=15 & c2020=0 & opc1 & c2427=14 & COND & + $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=15 & c2020=0 & opc1 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3410,7 +3410,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate # ===== END mcr -:mcr^COND cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & COND & c2427=14 & opc1 & c2020=0 & CRn & Rd & cpn & opc2 & c0404=1 & CRm +:mcr^COND cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & ARMcond=1 & COND & c2427=14 & opc1 & c2020=0 & CRn & Rd & cpn & opc2 & c0404=1 & CRm { build COND; t_cpn:4 = cpn; @@ -3421,14 +3421,14 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate ##### must come first cond=15 @if defined(VERSION_6) -:mcrr2 cpn,opcode3,Rd,Rn,CRm is $(AMODE) & cond=15 & c2027=0xc4 & cpn & opcode3 & Rd & Rn & CRm +:mcrr2 cpn,opcode3,Rd,Rn,CRm is $(AMODE) & ARMcond=0 & cond=15 & c2027=0xc4 & cpn & opcode3 & Rd & Rn & CRm { t_cpn:4 = cpn; t_op:4 = opcode3; coprocessor_moveto2(t_cpn,t_op,Rd,Rn,CRm); } -:mrrc2 cpn,opcode3,Rd,Rn,CRm is $(AMODE) & cond=15 & c2027=0xc5 & cpn & opcode3 & Rd & Rn & CRm +:mrrc2 cpn,opcode3,Rd,Rn,CRm is $(AMODE) & ARMcond=0 & cond=15 & c2027=0xc5 & cpn & opcode3 & Rd & Rn & CRm { t_cpn:4 = cpn; t_op:4 = opcode3; @@ -3441,7 +3441,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5E) -:mcrr^COND cpn,opcode3,Rd,Rn,CRm is $(AMODE) & c2027=0xc4 & COND & cpn & opcode3 & Rd & Rn & CRm +:mcrr^COND cpn,opcode3,Rd,Rn,CRm is $(AMODE) & c2027=0xc4 & COND & ARMcond=1 & cpn & opcode3 & Rd & Rn & CRm { build COND; t_cpn:4 = cpn; @@ -3449,7 +3449,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate coprocessor_moveto2(t_cpn,t_op,Rd,Rn,CRm); } -:mrrc^COND cpn,opcode3,Rd,Rn,CRm is $(AMODE) & c2027=0xc5 & COND & cpn & opcode3 & Rd & Rn & CRm +:mrrc^COND cpn,opcode3,Rd,Rn,CRm is $(AMODE) & c2027=0xc5 & COND & ARMcond=1 & cpn & opcode3 & Rd & Rn & CRm { build COND; t_cpn:4 = cpn; @@ -3460,7 +3460,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @endif # VERSION_5E -:mla^COND^SBIT_ZN Rn,Rm,Rs,Rd is $(AMODE) & COND & c2527=0 & c2124=1 & SBIT_ZN & Rn & Rd & Rs & c0407=9 & Rm +:mla^COND^SBIT_ZN Rn,Rm,Rs,Rd is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=1 & SBIT_ZN & Rn & Rd & Rs & c0407=9 & Rm { build COND; Rn = Rm*Rs + Rd; @@ -3470,14 +3470,14 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6T2) -:mls^COND Rn,Rm,Rs,Rd is $(AMODE) & COND & c2027=0x06 & Rn & Rd & Rs & c0407=9 & Rm { +:mls^COND Rn,Rm,Rs,Rd is $(AMODE) & ARMcond=1 & COND & c2027=0x06 & Rn & Rd & Rs & c0407=9 & Rm { build COND; Rn = Rd - Rm*Rs; } @endif # VERSION_6T2 -:mov^COND^SBIT_CZNO Rd,shift1 is $(AMODE) & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift1 +:mov^COND^SBIT_CZNO Rd,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift1 { build COND; build shift1; @@ -3487,7 +3487,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:mov^COND^SBIT_CZNO Rd,shift2 is $(AMODE) & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift2 +:mov^COND^SBIT_CZNO Rd,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift2 { build COND; build shift2; @@ -3497,7 +3497,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:mov lr,pc is $(AMODE) & c0031=0xe1a0e00f & lr & pc +:mov lr,pc is $(AMODE) & ARMcond=1 & c0031=0xe1a0e00f & lr & pc [ LRset=1; globalset(inst_next,LRset); ] { lr = inst_next + 4; @@ -3505,7 +3505,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate logicflags(); } -:mov^COND^SBIT_CZNO Rd,shift3 is $(AMODE) & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift3 +:mov^COND^SBIT_CZNO Rd,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift3 { build COND; build shift3; @@ -3515,7 +3515,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:mov^COND^SBIT_CZNO pc,shift1 is $(AMODE) & pc & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift1 +:mov^COND^SBIT_CZNO pc,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift1 { build COND; build shift1; @@ -3528,7 +3528,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:mov^COND^SBIT_CZNO pc,shift2 is $(AMODE) & pc & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 +:mov^COND^SBIT_CZNO pc,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 { build COND; build shift2; @@ -3540,7 +3540,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate ALUWritePC(tmp); goto [pc]; } -:mov^COND^SBIT_CZNO pc,shift2 is $(AMODE) & LRset=1 & pc & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 +:mov^COND^SBIT_CZNO pc,shift2 is $(AMODE) & LRset=1 & pc & COND & ARMcond=1 & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 { build COND; build shift2; @@ -3553,7 +3553,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate call [pc]; } -:mov^COND^SBIT_CZNO pc,shift3 is $(AMODE) & pc & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift3 +:mov^COND^SBIT_CZNO pc,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift3 { build COND; build shift3; @@ -3566,13 +3566,13 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:mov lr,rm is $(AMODE) & cond=15 & c2527=0 & S20=0 & c2124=13 & c1619=0 & rm & Rm2=15 & sftimm=0 & c0406=0 & Rd=14 & lr +:mov lr,rm is $(AMODE) & ARMcond=0 & cond=15 & c2527=0 & S20=0 & c2124=13 & c1619=0 & rm & Rm2=15 & sftimm=0 & c0406=0 & Rd=14 & lr [ LRset=1; globalset(inst_next,LRset); ] { lr = rm; } -:mov^COND pc,lr is $(AMODE) & pc & COND & c2527=0 & S20=0 & c2124=13 & c1619=0 & Rd=15 & sftimm=0 & c0406=0 & Rm=14 & lr +:mov^COND pc,lr is $(AMODE) & pc & ARMcond=1 & COND & c2527=0 & S20=0 & c2124=13 & c1619=0 & Rd=15 & sftimm=0 & c0406=0 & Rm=14 & lr { build COND; dest:4 = lr; @@ -3582,12 +3582,12 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6T2) -:movw^COND Rd,"#"^val is $(AMODE) & COND & c2027=0x30 & c1619 & Rd & c0011 [ val = (c1619 << 12) | c0011; ] { +:movw^COND Rd,"#"^val is $(AMODE) & ARMcond=1 & COND & c2027=0x30 & c1619 & Rd & c0011 [ val = (c1619 << 12) | c0011; ] { build COND; Rd = val; } -:movt^COND Rd,"#"^val is $(AMODE) & COND & c2027=0x34 & c1619 & Rd & c0011 [ val = (c1619 << 12) | c0011; ] { +:movt^COND Rd,"#"^val is $(AMODE) & ARMcond=1 & COND & c2027=0x34 & c1619 & Rd & c0011 [ val = (c1619 << 12) | c0011; ] { build COND; Rd = (val << 16) | (Rd & 0xffff); } @@ -3598,7 +3598,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5) -:mrc2 cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & cond=15 & c2427=14 & opc1 & c2020=1 & CRn & Rd & cpn & opc2 & c0404=1 & CRm +:mrc2 cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & ARMcond=0 & cond=15 & c2427=14 & opc1 & c2020=1 & CRn & Rd & cpn & opc2 & c0404=1 & CRm { t_cpn:4 = cpn; t_op1:4 = opc1; @@ -3611,7 +3611,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3621,7 +3621,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3631,7 +3631,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3641,7 +3641,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3651,7 +3651,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3661,7 +3661,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3671,7 +3671,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3681,7 +3681,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3691,7 +3691,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3701,7 +3701,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3711,7 +3711,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3721,7 +3721,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3731,7 +3731,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3741,7 +3741,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=3 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=3 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3751,7 +3751,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=5 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=5 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3761,7 +3761,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=5 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=5 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3771,7 +3771,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=6 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=6 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3781,7 +3781,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=6 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=6 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3791,7 +3791,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3801,7 +3801,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=5 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=5 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3811,7 +3811,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=5 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=5 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3821,7 +3821,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=5 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=5 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3831,7 +3831,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=6 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=6 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3841,7 +3841,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=6 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=6 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3851,7 +3851,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=6 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=6 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3861,7 +3861,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3871,7 +3871,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3881,7 +3881,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3891,7 +3891,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3901,7 +3901,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2=5 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2=5 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3911,7 +3911,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=14 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=14 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3921,7 +3921,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=14 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=14 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3931,7 +3931,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=7 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=7 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3941,7 +3941,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=7 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=7 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3951,7 +3951,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=7 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=7 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3961,7 +3961,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3971,7 +3971,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3981,7 +3981,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -3991,7 +3991,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4001,7 +4001,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4011,7 +4011,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=2 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=15 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=2 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=15 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4021,7 +4021,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4032,7 +4032,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=2 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=2 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4043,7 +4043,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=4 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=2 & c2427=14 & COND & + $(AMODE) & CRm=4 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=2 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4054,7 +4054,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4065,7 +4065,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4076,7 +4076,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4087,7 +4087,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=5 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=5 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4098,7 +4098,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm=10 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm=10 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4109,7 +4109,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & + $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4120,7 +4120,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate :mrc^COND mcrOperands is - $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=15 & c2020=1 & opc1 & c2427=14 & COND & + $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=15 & c2020=1 & opc1 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; @@ -4132,7 +4132,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate # ===== End mrc -:mrc^COND cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & COND & c2427=14 & opc1 & c2020=1 & CRn & Rd & cpn & opc2 & c0404=1 & CRm +:mrc^COND cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & ARMcond=1 & COND & c2427=14 & opc1 & c2020=1 & CRn & Rd & cpn & opc2 & c0404=1 & CRm { build COND; t_cpn:4 = cpn; @@ -4142,20 +4142,20 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } -:mrs^COND Rd,cpsr is $(AMODE) & COND & c2027=16 & c1619=15 & Rd & offset_12=0 & cpsr +:mrs^COND Rd,cpsr is $(AMODE) & ARMcond=1 & COND & c2027=16 & c1619=15 & Rd & offset_12=0 & cpsr { # TODO: GE bits have not been included build COND; Rd = zext( (NG<<4) | (ZR<<3) | (CY<<2) | (OV<<1) | (Q) ) << 27; } -:mrs^COND Rd,spsr is $(AMODE) & COND & c2027=20 & c1619=15 & Rd & offset_12=0 & spsr +:mrs^COND Rd,spsr is $(AMODE) & ARMcond=1 & COND & c2027=20 & c1619=15 & Rd & offset_12=0 & spsr { build COND; Rd = spsr; } -:msr^COND cpsrmask,shift1 is $(AMODE) & COND & c2027=50 & cpsrmask & c1215=15 & c2627=0 & shift1 +:msr^COND cpsrmask,shift1 is $(AMODE) & ARMcond=1 & COND & c2027=50 & cpsrmask & c1215=15 & c2627=0 & shift1 { build COND; build cpsrmask; @@ -4163,7 +4163,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate cpsr = (cpsr& ~cpsrmask) | (shift1 & cpsrmask); } -:msr^COND cpsrmask,rm is $(AMODE) & COND & c2027=18 & cpsrmask & c1215=15 & c0811=0 & c0407=0 & rm +:msr^COND cpsrmask,rm is $(AMODE) & ARMcond=1 & COND & c2027=18 & cpsrmask & c1215=15 & c0811=0 & c0407=0 & rm { # TODO: GE bits have not been included build COND; @@ -4177,7 +4177,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate NG = ((tmp >> 4) & 0x1) != 0; } -:msr^COND spsrmask,shift1 is $(AMODE) & COND & c2027=54 & spsrmask & c1215=15 & c2627=0 & shift1 +:msr^COND spsrmask,shift1 is $(AMODE) & ARMcond=1 & COND & c2027=54 & spsrmask & c1215=15 & c2627=0 & shift1 { build COND; build spsrmask; @@ -4185,14 +4185,14 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate spsr = (spsr& ~spsrmask) | (shift1 & spsrmask); } -:msr^COND spsrmask,rm is $(AMODE) & COND & c2027=22 & spsrmask & c1215=15 & c0811=0 & c0407=0 & rm +:msr^COND spsrmask,rm is $(AMODE) & ARMcond=1 & COND & c2027=22 & spsrmask & c1215=15 & c0811=0 & c0407=0 & rm { build COND; build spsrmask; spsr = (spsr& ~spsrmask) | (rm & spsrmask); } -:mul^COND^SBIT_ZN rn,rm,rs is $(AMODE) & COND & c2527=0 & c2124=0 & SBIT_ZN & rn & c1215=0 & rs & c0407=9 & rm +:mul^COND^SBIT_ZN rn,rm,rs is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=0 & SBIT_ZN & rn & c1215=0 & rs & c0407=9 & rm { build COND; build rm; @@ -4202,7 +4202,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_ZN; } -:mvn^COND^SBIT_CZNO Rd,shift1 is $(AMODE) & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift1 +:mvn^COND^SBIT_CZNO Rd,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift1 { build COND; build shift1; @@ -4212,7 +4212,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:mvn^COND^SBIT_CZNO Rd,shift2 is $(AMODE) & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift2 +:mvn^COND^SBIT_CZNO Rd,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift2 { build COND; build shift2; @@ -4222,7 +4222,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:mvn^COND^SBIT_CZNO Rd,shift3 is $(AMODE) & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift3 +:mvn^COND^SBIT_CZNO Rd,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift3 { build COND; build shift3; @@ -4232,7 +4232,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:mvn^COND^SBIT_ZN pc,shift1 is $(AMODE) & pc & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift1 +:mvn^COND^SBIT_ZN pc,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift1 { build COND; build shift1; @@ -4243,7 +4243,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:mvn^COND^SBIT_ZN pc,shift2 is $(AMODE) & pc & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift2 +:mvn^COND^SBIT_ZN pc,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift2 { build COND; build shift2; @@ -4254,7 +4254,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:mvn^COND^SBIT_ZN pc,shift3 is $(AMODE) & pc & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift3 +:mvn^COND^SBIT_ZN pc,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift3 { build COND; build shift3; @@ -4267,12 +4267,12 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6K) || defined(VERSION_6T2) || defined(VERSION_7) -:nop^COND is $(AMODE) & COND & c0027=0x320f000 { +:nop^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f000 { } @endif # VERSION_6K -:orr^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:orr^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -4283,7 +4283,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:orr^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:orr^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -4294,7 +4294,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:orr^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:orr^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -4305,7 +4305,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate build SBIT_CZNO; } -:orr^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:orr^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -4318,7 +4318,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:orr^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:orr^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -4331,7 +4331,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate goto [pc]; } -:orr^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:orr^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -4346,7 +4346,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6) -:pkhbt^COND Rd,rn,shift4 is $(AMODE) & COND & c2027=0x68 & c0406=1 & Rd & rn & shift4 +:pkhbt^COND Rd,rn,shift4 is $(AMODE) & ARMcond=1 & COND & c2027=0x68 & c0406=1 & Rd & rn & shift4 { build COND; build rn; @@ -4354,7 +4354,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate Rd = (rn & 0xffff) + (shift4 & 0xffff0000); } -:pkhtb^COND Rd,rn,shift4 is $(AMODE) & COND & c2027=0x68 & c0406=5 & Rd & rn & shift4 +:pkhtb^COND Rd,rn,shift4 is $(AMODE) & ARMcond=1 & COND & c2027=0x68 & c0406=5 & Rd & rn & shift4 { build COND; build rn; @@ -4366,7 +4366,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5E) -:qadd^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=0x10 & Rn & Rd & c0811=0 & c0407=5 & Rm +:qadd^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x10 & Rn & Rd & c0811=0 & c0407=5 & Rm { build COND; local sum1 = Rm + Rn; @@ -4379,7 +4379,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6) -:qadd16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=1 & Rn & Rd & Rm +:qadd16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=1 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; @@ -4393,7 +4393,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate Rd = (zext(sum2) << 16) | zext(sum1); } -:qadd8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=9 & Rn & Rd & Rm +:qadd8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=9 & Rn & Rd & Rm { build COND; local rn1 = Rn & 0xff; @@ -4416,7 +4416,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate } # qaddsubx -:qasx^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=3 & Rn & Rd & Rm +:qasx^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=3 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; @@ -4434,7 +4434,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5E) -:qdadd^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=0x14 & Rn & Rd & c0811=0 & c0407=5 & Rm +:qdadd^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x14 & Rn & Rd & c0811=0 & c0407=5 & Rm { build COND; tmp:4 = Rn * 2; @@ -4446,7 +4446,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate Rd = tmp; } -:qdsub^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=0x16 & Rn & Rd & c0811=0 & c0407=5 & Rm +:qdsub^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x16 & Rn & Rd & c0811=0 & c0407=5 & Rm { build COND; tmp:4 = Rn * 2; @@ -4463,7 +4463,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6) # qsubaddx -:qsax^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=5 & Rn & Rd & Rm +:qsax^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=5 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; @@ -4481,7 +4481,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_5E) -:qsub^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=0x12 & Rn & Rd & c0811=0 & c0407=5 & Rm +:qsub^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x12 & Rn & Rd & c0811=0 & c0407=5 & Rm { build COND; tmp:4 = Rm - Rn; @@ -4494,7 +4494,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate @if defined(VERSION_6) -:qsub16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=7 & Rn & Rd & Rm +:qsub16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=7 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; @@ -4508,7 +4508,7 @@ ArmPCRelImmed12: reloff is U23=0 & immed & rotate Rd = (zext(sum2) << 16) | zext(sum1); } -:qsub8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x62 & c0811=15 & c0407=15 & Rn & Rd & Rm +:qsub8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=15 & Rn & Rd & Rm { build COND; local rn1 = Rn & 0xff; @@ -4557,7 +4557,7 @@ macro BitReverse_arm(val) { } -:rbit^COND Rd, rm is $(AMODE) & COND & c2327=13 & c2022=7 & c0407=3 & c1619=15 & c0811=15 & Rd & rm +:rbit^COND Rd, rm is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=7 & c0407=3 & c1619=15 & c0811=15 & Rd & rm { build COND; build rm; @@ -4580,7 +4580,7 @@ macro BitReverse_arm(val) { @if defined(VERSION_6) -:rev^COND Rd, rm is $(AMODE) & COND & c2327=13 & c2022=3 & c0407=3 & c1619=15 & c0811=15 & Rd & rm +:rev^COND Rd, rm is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=3 & c0407=3 & c1619=15 & c0811=15 & Rd & rm { build COND; build rm; @@ -4591,7 +4591,7 @@ macro BitReverse_arm(val) { Rd = (tmp1 << 24) | (tmp2 << 16) | (tmp3 << 8) | tmp4; } -:rev16^COND Rd, rm is $(AMODE) & COND & c2327=13 & c2022=3 & c0407=11 & Rd & rm +:rev16^COND Rd, rm is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=3 & c0407=11 & Rd & rm { build COND; build rm; @@ -4602,7 +4602,7 @@ macro BitReverse_arm(val) { Rd = (tmp3 << 24) | (tmp4 << 16) | (tmp1 << 8) | tmp2; } -:revsh^COND Rd, rm is $(AMODE) & COND & c2327=13 & c2022=7 & c0407=11 & Rd & rm +:revsh^COND Rd, rm is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=7 & c0407=11 & Rd & rm { build COND; build rm; @@ -4614,7 +4614,7 @@ macro BitReverse_arm(val) { @endif # VERSION_6 -:rsb^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:rsb^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -4625,7 +4625,7 @@ macro BitReverse_arm(val) { build SBIT_CZNO; } -:rsb^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:rsb^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -4636,7 +4636,7 @@ macro BitReverse_arm(val) { build SBIT_CZNO; } -:rsb^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:rsb^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -4647,7 +4647,7 @@ macro BitReverse_arm(val) { build SBIT_CZNO; } -:rsb^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:rsb^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -4660,7 +4660,7 @@ macro BitReverse_arm(val) { goto [pc]; } -:rsb^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:rsb^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -4673,7 +4673,7 @@ macro BitReverse_arm(val) { goto [pc]; } -:rsb^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:rsb^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -4686,7 +4686,7 @@ macro BitReverse_arm(val) { goto [pc]; } -:rsc^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:rsc^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -4697,7 +4697,7 @@ macro BitReverse_arm(val) { build SBIT_CZNO; } -:rsc^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:rsc^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -4708,7 +4708,7 @@ macro BitReverse_arm(val) { build SBIT_CZNO; } -:rsc^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:rsc^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -4719,7 +4719,7 @@ macro BitReverse_arm(val) { build SBIT_CZNO; } -:rsc^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:rsc^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -4732,7 +4732,7 @@ macro BitReverse_arm(val) { goto [pc]; } -:rsc^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:rsc^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -4745,7 +4745,7 @@ macro BitReverse_arm(val) { goto [pc]; } -:rsc^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:rsc^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -4760,7 +4760,7 @@ macro BitReverse_arm(val) { @if defined(VERSION_6) -:sadd16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=1 & Rn & Rd & Rm +:sadd16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=1 & Rn & Rd & Rm { build COND; local tmpRn = Rn & 0xffff; @@ -4776,7 +4776,7 @@ macro BitReverse_arm(val) { Rd = ((sum2 & 0xffff) << 16) | (sum1 & 0xffff); } -:sadd8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=9 & Rn & Rd & Rm +:sadd8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=9 & Rn & Rd & Rm { build COND; local tmpRn = Rn & 0xff; @@ -4799,7 +4799,7 @@ macro BitReverse_arm(val) { } # saddsubx -:sasx^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=3 & Rn & Rd & Rm +:sasx^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=3 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; @@ -4818,7 +4818,7 @@ macro BitReverse_arm(val) { @endif # VERSION_6 -:sbc^SBIT_CZNO^COND Rd,rn,shift1 is $(AMODE) & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:sbc^SBIT_CZNO^COND Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -4829,7 +4829,7 @@ macro BitReverse_arm(val) { build SBIT_CZNO; } -:sbc^SBIT_CZNO^COND Rd,rn,shift2 is $(AMODE) & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:sbc^SBIT_CZNO^COND Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -4840,7 +4840,7 @@ macro BitReverse_arm(val) { build SBIT_CZNO; } -:sbc^SBIT_CZNO^COND Rd,rn,shift3 is $(AMODE) & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:sbc^SBIT_CZNO^COND Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -4851,7 +4851,7 @@ macro BitReverse_arm(val) { build SBIT_CZNO; } -:sbc^SBIT_CZNO^COND pc,rn,shift1 is $(AMODE) & pc & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:sbc^SBIT_CZNO^COND pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -4864,7 +4864,7 @@ macro BitReverse_arm(val) { goto [pc]; } -:sbc^SBIT_CZNO^COND pc,rn,shift2 is $(AMODE) & pc & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:sbc^SBIT_CZNO^COND pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -4877,7 +4877,7 @@ macro BitReverse_arm(val) { goto [pc]; } -:sbc^SBIT_CZNO^COND pc,rn,shift3 is $(AMODE) & pc & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:sbc^SBIT_CZNO^COND pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -4894,7 +4894,7 @@ macro BitReverse_arm(val) { @if defined(VERSION_6T2) -:sbfx^COND Rd,Rm,lsbImm,widthMinus1 is $(AMODE) & COND & c2127=0x3d & widthMinus1 & Rd & lsbImm & c0406=5 & Rm +:sbfx^COND Rd,Rm,lsbImm,widthMinus1 is $(AMODE) & COND & ARMcond=1 & c2127=0x3d & widthMinus1 & Rd & lsbImm & c0406=5 & Rm { build COND; build lsbImm; @@ -4910,7 +4910,7 @@ macro BitReverse_arm(val) { @if defined(VERSION_7) # Warning: note the non-standard use of Rd, Rm, Rn -:sdiv^COND RdHi,RnLo,RmHi is $(AMODE) & COND & c2027=0x71 & RdHi & c1215=0xf & RmHi & c0407=0x1 & RnLo +:sdiv^COND RdHi,RnLo,RmHi is $(AMODE) & ARMcond=1 & COND & c2027=0x71 & RdHi & c1215=0xf & RmHi & c0407=0x1 & RnLo { build COND; local result = RnLo / RmHi; @@ -4919,7 +4919,7 @@ macro BitReverse_arm(val) { @endif # VERSION_7 -:sel^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x68 & Rn & Rd & c0811=15 & c0407=11 & Rm +:sel^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x68 & Rn & Rd & c0811=15 & c0407=11 & Rm { build COND; local rD1 = ((zext(GE1) * Rn) + (zext(!GE1) * Rm)) & 0x0ff; @@ -4931,7 +4931,7 @@ macro BitReverse_arm(val) { @if defined(VERSION_6K) -:sev^COND is $(AMODE) & COND & c0027=0x320f004 +:sev^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f004 { build COND; SendEvent(); @@ -4946,7 +4946,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } :setend armEndianNess is $(AMODE) & (c0031=0xf1010000 | c0031=0xf1010200) & armEndianNess { setEndianState(armEndianNess); } -:shadd16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=1 & Rm +:shadd16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=1 & Rm { build COND; local tmpRn = Rn; @@ -4956,7 +4956,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (sum2 << 16) + (sum1 & 0xffff); } -:shadd8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=9 & Rm +:shadd8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=9 & Rm { build COND; local tmpRn = Rn; @@ -4975,7 +4975,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } # shaddsubx -:shasx^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=3 & Rm +:shasx^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=3 & Rm { build COND; local tmpRn = Rn; @@ -4987,7 +4987,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } # shsubbaddx -:shsax^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=5 & Rm +:shsax^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=5 & Rm { build COND; local tmpRn = Rn; @@ -4998,7 +4998,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd[16,16] = diff[1,16]; } -:shsub16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=7 & Rm +:shsub16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=7 & Rm { build COND; local tmpRn = Rn; @@ -5008,7 +5008,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (sum2 << 16) + (sum1 & 0xffff); } -:shsub8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=15 & Rm +:shsub8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=15 & Rm { build COND; local tmpRn = Rn; @@ -5030,7 +5030,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_5E) -:smla^XBIT^YBIT^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x10 & smRd & smRn & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRa +:smla^XBIT^YBIT^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x10 & smRd & smRn & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRa { build COND; local tmp:4 = sext(XBIT) * sext(YBIT); @@ -5042,7 +5042,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6) -:smlad^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x70 & c0407=1 & smRd & smRa & smRm & smRn +:smlad^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & c0407=1 & smRd & smRa & smRm & smRn { build COND; local tmpRn = smRn; @@ -5058,7 +5058,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } smRd = smRa + tmpprod; } -:smladx^COND smRd, smRn, smRm, smRa is $(AMODE) & COND & c2027=0x70 & c0407=3 & smRd & smRn & smRm & smRa +:smladx^COND smRd, smRn, smRm, smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & c0407=3 & smRd & smRn & smRm & smRa { build COND; local tmpRn = smRn; @@ -5076,7 +5076,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @endif # VERSION_6 -:smlal^COND^SBIT_ZN RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2527=0 & c2124=7 & SBIT_ZN & RdLo & RdHi & smRn & c0407=9 & smRm +:smlal^COND^SBIT_ZN RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=7 & SBIT_ZN & RdLo & RdHi & smRn & c0407=9 & smRm { build COND; tmp:8 = (zext(RdHi) << 32) | zext(RdLo); @@ -5091,7 +5091,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_5E) -:smlal^XBIT^YBIT^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x14 & RdLo & RdHi & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRn +:smlal^XBIT^YBIT^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x14 & RdLo & RdHi & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRn { build COND; local prod:8 = sext(XBIT) * sext(YBIT); @@ -5105,7 +5105,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6) -:smlald^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x74 & RdLo & RdHi & c0607=0 & c0405=1 & smRn & smRm +:smlald^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x74 & RdLo & RdHi & c0607=0 & c0405=1 & smRn & smRm { build COND; local tmpRn = smRn; @@ -5119,7 +5119,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } RdHi = result(4); } -:smlaldx^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x74 & RdLo & RdHi & c0607=0 & c0405=3 & smRn & smRm +:smlaldx^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x74 & RdLo & RdHi & c0607=0 & c0405=3 & smRn & smRm { build COND; local tmpRn = smRn; @@ -5137,7 +5137,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_5E) -:smlaw^YBIT^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x12 & smRd & smRn & smRm & c0707=1 & YBIT & x=0 & c0404=0 & smRa +:smlaw^YBIT^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x12 & smRd & smRn & smRm & c0707=1 & YBIT & x=0 & c0404=0 & smRa { build COND; local tmp64:6 = sext(smRn) * sext(YBIT); @@ -5150,7 +5150,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6) -:smlsd^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x70 & smRd & smRn & c0607=1 & x=0 & c0404=1 & smRm & smRa +:smlsd^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & smRd & smRn & c0607=1 & x=0 & c0404=1 & smRm & smRa { build COND; local tmpRn = smRn; @@ -5164,7 +5164,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } smRd = smRa + diff; } -:smlsdx^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x70 & smRd & smRn & c0607=1 & x=1 & c0404=1 & smRm & smRa +:smlsdx^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & smRd & smRn & c0607=1 & x=1 & c0404=1 & smRm & smRa { build COND; local tmpRn = smRn; @@ -5178,7 +5178,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } smRd = smRa + diff; } -:smlsld^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x74 & RdHi & RdLo & smRm & c0607=1 & x=0 & c0404=1 & smRn +:smlsld^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x74 & RdHi & RdLo & smRm & c0607=1 & x=0 & c0404=1 & smRn { build COND; local tmpRn = smRn; @@ -5192,7 +5192,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } RdHi = result(4); } -:smlsldx^COND RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2027=0x74 & RdHi & RdLo & smRm & c0607=1 & x=1 & c0404=1 & smRn +:smlsldx^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x74 & RdHi & RdLo & smRm & c0607=1 & x=1 & c0404=1 & smRn { build COND; local tmpRn = smRn; @@ -5206,7 +5206,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } RdHi = result(4); } -:smmla^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x75 & smRd & smRn & smRm & c0607=0 & r=0 & c0404=1 & smRa +:smmla^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & smRn & smRm & c0607=0 & r=0 & c0404=1 & smRa { build COND; val:8 = sext(smRn) * sext(smRm); @@ -5214,7 +5214,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } smRd = val(4); } -:smmlar^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x75 & smRd & smRn & smRm & c0607=0 & r=1 & c0404=1 & smRa +:smmlar^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & smRn & smRm & c0607=0 & r=1 & c0404=1 & smRa { build COND; val:8 = sext(smRn) * sext(smRm); @@ -5222,7 +5222,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } smRd = val(4); } -:smmls^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x75 & smRd & smRn & smRm & c0607=3 & r=0 & c0404=1 & smRa +:smmls^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & smRn & smRm & c0607=3 & r=0 & c0404=1 & smRa { build COND; val:8 = sext(smRn) * sext(smRm); @@ -5230,7 +5230,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } smRd = val(4); } -:smmlsr^COND smRd,smRn,smRm,smRa is $(AMODE) & COND & c2027=0x75 & smRd & smRn & smRm & c0607=3 & r=1 & c0404=1 & smRa +:smmlsr^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & smRn & smRm & c0607=3 & r=1 & c0404=1 & smRa { build COND; val:8 = sext(smRn) * sext(smRm); @@ -5238,21 +5238,21 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } smRd = val(4); } -:smmul^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x75 & smRd & c1215=15 & smRn & c0607=0 & r=0 & c0404=1 & smRm +:smmul^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & c1215=15 & smRn & c0607=0 & r=0 & c0404=1 & smRm { build COND; val:8 = sext(smRn) * sext(smRm); smRd = val(4); } -:smmulr^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x75 & smRd & c1215=15 & smRn & c0607=0 & r=1 & c0404=1 & smRm +:smmulr^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & c1215=15 & smRn & c0607=0 & r=1 & c0404=1 & smRm { build COND; val:8 = (sext(smRn) * sext(smRm)) + 0x080000000; smRd = val(4); } -:smuad^COND smRd, smRn, smRm is $(AMODE) & COND & c2027=0x70 & c0407=1 & smRd & c1619=15 & smRn & smRm +:smuad^COND smRd, smRn, smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & c0407=1 & smRd & c1619=15 & smRn & smRm { build COND; local tmpRm = smRm; @@ -5268,7 +5268,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } smRd = tmpprod; } -:smuadx^COND smRd, smRn, smRm is $(AMODE) & COND & c2027=0x70 & c0407=3 & smRd & c1619=15 & smRn & smRm +:smuadx^COND smRd, smRn, smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & c0407=3 & smRd & c1619=15 & smRn & smRm { build COND; local tmpRm = smRm; @@ -5288,7 +5288,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_5E) -:smul^XBIT^YBIT^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x16 & smRd & c1215=0 & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRn +:smul^XBIT^YBIT^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x16 & smRd & c1215=0 & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRn { build COND; tmp:8 = sext(XBIT) * sext(YBIT); @@ -5297,7 +5297,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @endif # VERSION_5E -:smull^COND^SBIT_ZN RdLo,RdHi,smRn,smRm is $(AMODE) & COND & c2527=0 & c2124=6 & SBIT_ZN & RdHi & RdLo & smRn & c0407=9 & smRm +:smull^COND^SBIT_ZN RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=6 & SBIT_ZN & RdHi & RdLo & smRn & c0407=9 & smRm { build COND; rn64:8 = sext(smRn); @@ -5311,7 +5311,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_5E) -:smulw^YBIT^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x12 & smRd & c1215=0 & smRn & c0707=1 & YBIT & x=1 & c0404=0 & smRm +:smulw^YBIT^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x12 & smRd & c1215=0 & smRn & c0707=1 & YBIT & x=1 & c0404=0 & smRm { build COND; tmp:6 = sext(smRn) * sext(YBIT); @@ -5323,7 +5323,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6) -:smusd^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x70 & smRd & c1215=15 & smRm & c0607=1 & x=0 & c0404=1 & smRn +:smusd^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & smRd & c1215=15 & smRm & c0607=1 & x=0 & c0404=1 & smRn { build COND; local tmpRn = smRn; @@ -5335,7 +5335,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } smRd = prod1 - prod2; } -:smusdx^COND smRd,smRn,smRm is $(AMODE) & COND & c2027=0x70 & smRd & c1215=15 & smRm & c0607=1 & x=1 & c0404=1 & smRn +:smusdx^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & smRd & c1215=15 & smRm & c0607=1 & x=1 & c0404=1 & smRn { build COND; local tmpRn = smRn; @@ -5348,7 +5348,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } -:ssat^COND Rd, sSatImm5, shift4 is $(AMODE) & COND & c2127=0x35 & c0405=1 & sSatImm5 & Rd & shift4 +:ssat^COND Rd, sSatImm5, shift4 is $(AMODE) & ARMcond=1 & COND & c2127=0x35 & c0405=1 & sSatImm5 & Rd & shift4 { build COND; build shift4; @@ -5357,7 +5357,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = tmp; } -:ssat16^COND Rd, sSatImm4, Rm is $(AMODE) & COND & c2027=0x6a & c0811=15 & c0407=0x3 & sSatImm4 & Rd & Rm +:ssat16^COND Rd, sSatImm4, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x6a & c0811=15 & c0407=0x3 & sSatImm4 & Rd & Rm { build COND; build sSatImm4; @@ -5370,7 +5370,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } # ssubaddx -:ssax^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=5 & Rn & Rd & Rm +:ssax^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=5 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; @@ -5386,7 +5386,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = ((diff & 0xffff) << 16) | (sum & 0xffff); } -:ssub16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=7 & Rn & Rd & Rm +:ssub16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=7 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; @@ -5402,7 +5402,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = ((diffu & 0xffff) << 16) | (diffl & 0xffff); } -:ssub8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x61 & c0811=15 & c0407=15 & Rn & Rd & Rm +:ssub8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=15 & Rn & Rd & Rm { build COND; local tmpRn = Rn & 0xff; @@ -5426,7 +5426,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @endif # VERSION_6 -:stc^COND cpn,CRd,addrmode5 is $(AMODE) & COND & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=0 +:stc^COND cpn,CRd,addrmode5 is $(AMODE) & ARMcond=1 & COND & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=0 { build COND; build addrmode5; @@ -5434,7 +5434,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } coprocessor_store(t_cpn,CRd,addrmode5); } -:stcl^COND cpn,CRd,addrmode5 is $(AMODE) & COND & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=0 +:stcl^COND cpn,CRd,addrmode5 is $(AMODE) & ARMcond=1 & COND & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=0 { build COND; build addrmode5; @@ -5442,13 +5442,13 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } coprocessor_storelong(t_cpn,CRd,addrmode5); } -:stm^mdir^COND reglist is $(AMODE) & COND & c2527=4 & mdir & L20=0 & reglist +:stm^mdir^COND reglist is $(AMODE) & ARMcond=1 & COND & c2527=4 & mdir & L20=0 & reglist { build COND; build reglist; } -#:str^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +#:str^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 #{ # build COND; # build addrmode2; @@ -5457,14 +5457,14 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } #} # The following form of str assumes alignment checking is on -:str^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:str^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; *addrmode2 = Rd; } -:strb^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=1 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:strb^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=1 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; @@ -5472,7 +5472,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } *addrmode2 = tmpRd:1; } -:strbt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 &P24=0 & B22=1 & W21=1 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:strbt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & P24=0 & B22=1 & W21=1 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; @@ -5480,7 +5480,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } *addrmode2 = tmpRd:1; } -:strh^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & L20=0 & c0407=11 & Rd & addrmode3 +:strh^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & L20=0 & c0407=11 & Rd & addrmode3 { build COND; build addrmode3; @@ -5490,7 +5490,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_5E) -:strd^COND Rd,Rd2,addrmode3 is $(AMODE) & COND & c2527=0 & c0407=0xf & L20=0 & Rd & Rd2 & addrmode3 +:strd^COND Rd,Rd2,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & c0407=0xf & L20=0 & Rd & Rd2 & addrmode3 { build COND; build addrmode3; @@ -5504,7 +5504,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6) -:strex^COND Rd,Rm,[Rn] is $(AMODE) & COND & c2027=0x18 & c0411=0xf9 & Rn & Rd & Rm +:strex^COND Rd,Rm,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x18 & c0411=0xf9 & Rn & Rd & Rm { build COND; local tmp = Rn; @@ -5519,7 +5519,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6K) -:strexb^COND Rd,Rm,[Rn] is $(AMODE) & COND & c2027=0x1c & c0411=0xf9 & Rn & Rd & Rm +:strexb^COND Rd,Rm,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1c & c0411=0xf9 & Rn & Rd & Rm { build COND; local tmp = Rn; @@ -5531,7 +5531,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } *tmp = tmpRm:1; } -:strexd^COND Rd,Rm,Rm2,[Rn] is $(AMODE) & COND & c2027=0x1a & Rn & Rd & c0411=0xf9 & c0003 & Rm & Rm2 +:strexd^COND Rd,Rm,Rm2,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1a & Rn & Rd & c0411=0xf9 & c0003 & Rm & Rm2 { build COND; local addr = Rn; @@ -5544,7 +5544,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } *(addr) = Rm2; } -:strexh^COND Rd,Rm,[Rn] is $(AMODE) & COND & c2027=0x1e & c0411=0xf9 & Rn & Rd & Rm +:strexh^COND Rd,Rm,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1e & c0411=0xf9 & Rn & Rd & Rm { build COND; local tmp = Rn; @@ -5556,14 +5556,14 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } *tmp = tmpRm:2; } -:strht^COND Rd,addrmode3 is $(AMODE) & COND & c2527=0 & P24=0 & W21=1 & L20=0 & c0407=11 & Rd & addrmode3 { +:strht^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & P24=0 & W21=1 & L20=0 & c0407=11 & Rd & addrmode3 { build COND; *:2 addrmode3 = Rd; } @endif # VERSION_6K -#:strt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=0 & P24=0 & W21=1 & Rd & addrmode2 +#:strt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=0 & P24=0 & W21=1 & Rd & addrmode2 #{ # build COND; # build addrmode2; @@ -5572,14 +5572,14 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } #} # The following form of str assumes alignment checking is on -:strt^COND Rd,addrmode2 is $(AMODE) & COND & c2627=1 & B22=0 & L20=0 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 +:strt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=0 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; *addrmode2 = Rd; } -:sub^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 +:sub^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; @@ -5590,7 +5590,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } build SBIT_CZNO; } -:sub^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 +:sub^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; @@ -5601,7 +5601,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } build SBIT_CZNO; } -:sub^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 +:sub^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; @@ -5612,7 +5612,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } build SBIT_CZNO; } -:sub^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 +:sub^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; @@ -5627,7 +5627,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } goto [pc]; } -:sub^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & Rn=14 & I25=1 & immed=0 & rotate=0 & c2627=0 & shift1 +:sub^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & Rn=14 & I25=1 & immed=0 & rotate=0 & c2627=0 & shift1 { build COND; build rn; @@ -5641,7 +5641,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } return [pc]; } -:sub^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 +:sub^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; @@ -5656,7 +5656,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } goto [pc]; } -:sub^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 +:sub^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; @@ -5671,14 +5671,14 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } goto [pc]; } -:swi^COND immed24 is $(AMODE) & COND & c2427=15 & immed24 +:swi^COND immed24 is $(AMODE) & ARMcond=1 & COND & c2427=15 & immed24 { build COND; tmp:4 = immed24; software_interrupt(tmp); } -#:swp^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=16 & Rn & Rd & c0811=0 & c0407=9 & Rm +#:swp^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=16 & Rn & Rd & c0811=0 & c0407=9 & Rm #{ # build COND; # tmp = Rn & 0xfffffffc; @@ -5690,7 +5690,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } #} # Assuming alignment checking is enabled -:swp^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=16 & Rn & Rd & c0811=0 & c0407=9 & Rm +:swp^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=16 & Rn & Rd & c0811=0 & c0407=9 & Rm { build COND; val:4 = *Rn; @@ -5698,7 +5698,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = val; } -:swpb^COND Rd,Rm,Rn is $(AMODE) & COND & c2027=20 & Rn & Rd & c0811=0 & c0407=9 & Rm +:swpb^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=20 & Rn & Rd & c0811=0 & c0407=9 & Rm { build COND; local tmp = *:1 Rn; @@ -5709,14 +5709,14 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6) -:sxtab^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=2 & c0407=7 & Rd & Rn & ror1 +:sxtab^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=2 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; Rd = Rn + sext(ror1:1); } -:sxtab16^COND Rd,Rn,ror1 is $(AMODE) & COND & c2027=0x68 & c0407=7 & Rn & Rd & ror1 +:sxtab16^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2027=0x68 & c0407=7 & Rn & Rd & ror1 { build COND; build ror1; @@ -5727,21 +5727,21 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (zext(hi) << 16) + zext(lo); } -:sxtah^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=3 & c0407=7 & Rd & Rn & ror1 +:sxtah^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=3 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; Rd = Rn + sext(ror1:2); } -:sxtb^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=2 & c0407=7 & Rd & c1619=15 & ror1 +:sxtb^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=2 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; Rd = sext(ror1:1); } -:sxtb16^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=0 & c0407=7 & Rd & c1619=15 & ror1 +:sxtb16^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=0 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; @@ -5752,7 +5752,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (zext(high) << 16) | zext(low); } -:sxth^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=3 & c0407=7 & Rd & c1619=15 & ror1 +:sxth^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=3 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; @@ -5761,7 +5761,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @endif # VERSION_6 -:teq^COND rn,shift1 is $(AMODE) & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift1 +:teq^COND rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift1 { build COND; build rn; @@ -5772,7 +5772,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } affectflags(); } -:teq^COND rn,shift2 is $(AMODE) & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift2 +:teq^COND rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift2 { build COND; build rn; @@ -5783,7 +5783,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } affectflags(); } -:teq^COND rn,shift3 is $(AMODE) & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift3 +:teq^COND rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift3 { build COND; build rn; @@ -5794,7 +5794,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } affectflags(); } -:teq^COND^"p" rn,shift1 is $(AMODE) & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift1 +:teq^COND^"p" rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift1 { build COND; build rn; @@ -5805,7 +5805,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } affectflags(); } -:teq^COND^"p" rn,shift2 is $(AMODE) & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift2 +:teq^COND^"p" rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift2 { build COND; build rn; @@ -5816,7 +5816,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } affectflags(); } -:teq^COND^"p" rn,shift3 is $(AMODE) & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift3 +:teq^COND^"p" rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift3 { build COND; build rn; @@ -5828,7 +5828,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } -:tst^COND rn,shift1 is $(AMODE) & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift1 +:tst^COND rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift1 { build COND; build rn; @@ -5839,7 +5839,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } affectflags(); } -:tst^COND rn,shift2 is $(AMODE) & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift2 +:tst^COND rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift2 { build COND; build rn; @@ -5850,7 +5850,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } affectflags(); } -:tst^COND rn,shift3 is $(AMODE) & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift3 +:tst^COND rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift3 { build COND; build rn; @@ -5863,7 +5863,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6) -:uadd16^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=1 & Rd & rn & rm +:uadd16^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=1 & Rd & rn & rm { build COND; build rn; @@ -5885,7 +5885,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = zext(tmpHigh) << 16 | zext(tmpLow); } -:uadd8^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=9 & Rd & rn & rm +:uadd8^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=9 & Rd & rn & rm { build COND; build rn; @@ -5912,7 +5912,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } # uaddsubx -:uasx^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=3 & Rd & rn & rm +:uasx^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=3 & Rd & rn & rm { build COND; build rn; @@ -5938,7 +5938,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6T2) -:ubfx^COND Rd,Rm,lsbImm,widthMinus1 is $(AMODE) & COND & c2127=0x3f & widthMinus1 & Rd & lsbImm & c0406=5 & Rm +:ubfx^COND Rd,Rm,lsbImm,widthMinus1 is $(AMODE) & ARMcond=1 & COND & c2127=0x3f & widthMinus1 & Rd & lsbImm & c0406=5 & Rm { build COND; build lsbImm; @@ -5953,7 +5953,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_7) -:udiv^COND RdHi,RnLo,RmHi is $(AMODE) & COND & c2027=0x73 & RdHi & c1215=0xf & RmHi & c0407=0x1 & RnLo +:udiv^COND RdHi,RnLo,RmHi is $(AMODE) & ARMcond=1 & COND & c2027=0x73 & RdHi & c1215=0xf & RmHi & c0407=0x1 & RnLo { build COND; result:8 = zext(RnLo) / zext(RmHi); @@ -5964,7 +5964,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6) -:uhadd16^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=1 & Rd & rn & rm +:uhadd16^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=1 & Rd & rn & rm { build COND; build rn; @@ -5979,7 +5979,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd[16,16] = tmpHigh[1,16]; } -:uhadd8^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=9 & Rd & rn & rm +:uhadd8^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=9 & Rd & rn & rm { build COND; build rn; @@ -6002,7 +6002,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } # uhaddsubx -:uhasx^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=3 & Rd & rn & rm +:uhasx^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=3 & Rd & rn & rm { build COND; build rn; @@ -6019,7 +6019,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } # uhsubaddx -:uhsax^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=5 & Rd & rn & rm +:uhsax^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=5 & Rd & rn & rm { build COND; build rn; @@ -6035,7 +6035,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (tmpHigh << 16) | tmpLow; } -:uhsub16^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=7 & Rd & rn & rm +:uhsub16^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=7 & Rd & rn & rm { build COND; build rn; @@ -6051,7 +6051,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (tmpHigh << 16) | tmpLow; } -:uhsub8^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=7 & c0811=15 & c0407=15 & Rd & rn & rm +:uhsub8^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=15 & Rd & rn & rm { build COND; build rn; @@ -6073,7 +6073,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (b4 << 24) | (b3 << 16) | (b2 << 8) | b1; } -:umaal^COND RdLo,RdHi,Rm,Rs is $(AMODE) & COND & c2027=0x04 & RdHi & RdLo & Rs & c0407=9 & Rm +:umaal^COND RdLo,RdHi,Rm,Rs is $(AMODE) & ARMcond=1 & COND & c2027=0x04 & RdHi & RdLo & Rs & c0407=9 & Rm { build COND; result:8 = (zext(Rm) * zext(Rs)) + zext(RdLo) + zext(RdHi); @@ -6083,7 +6083,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @endif # VERSION_6 -:umlal^COND^SBIT_ZN Rd,Rn,rm,rs is $(AMODE) & COND & c2527=0 & c2124=5 & SBIT_ZN & Rn & Rd & rs & c0407=9 & rm +:umlal^COND^SBIT_ZN Rd,Rn,rm,rs is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=5 & SBIT_ZN & Rn & Rd & rs & c0407=9 & rm { build COND; build rm; @@ -6098,7 +6098,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } build SBIT_ZN; } -:umull^COND^SBIT_ZN Rd,Rn,rm,rs is $(AMODE) & COND & c2527=0 & c2124=4 & SBIT_ZN & Rn & Rd & rs & c0407=9 & rm +:umull^COND^SBIT_ZN Rd,Rn,rm,rs is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=4 & SBIT_ZN & Rn & Rd & rs & c0407=9 & rm { build COND; build rm; @@ -6114,7 +6114,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6) -:uqadd16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=1 & Rn & Rd & Rm +:uqadd16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=1 & Rn & Rd & Rm { build COND; local tmpRn = Rn; @@ -6128,7 +6128,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (zext(sum2) << 16) | zext(sum1); } -:uqadd8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=9 & Rn & Rd & Rm +:uqadd8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=9 & Rn & Rd & Rm { build COND; local tmpRn = Rn; @@ -6149,7 +6149,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } # uqaddsubx -:uqasx^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=3 & Rn & Rd & Rm +:uqasx^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=3 & Rn & Rd & Rm { build COND; local tmpRn = Rn; @@ -6164,7 +6164,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } # uqsubaddx -:uqsax^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=5 & Rn & Rd & Rm +:uqsax^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=5 & Rn & Rd & Rm { build COND; local tmpRn = Rn; @@ -6178,7 +6178,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (zext(sum2) << 16) | zext(sum1); } -:uqsub16^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=7 & Rn & Rd & Rm +:uqsub16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=7 & Rn & Rd & Rm { build COND; local tmpRn = Rn; @@ -6192,7 +6192,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (zext(sum2) << 16) | zext(sum1); } -:uqsub8^COND Rd, Rn, Rm is $(AMODE) & COND & c2027=0x66 & c0811=15 & c0407=15 & Rn & Rd & Rm +:uqsub8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=15 & Rn & Rd & Rm { build COND; local tmpRn = Rn; @@ -6212,7 +6212,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); } -:usad8^COND Rd, Rm, Rs is $(AMODE) & COND & c2027=0x78 & c1215=15 & c0407=1 & Rd & Rm & Rs +:usad8^COND Rd, Rm, Rs is $(AMODE) & ARMcond=1 & COND & c2027=0x78 & c1215=15 & c0407=1 & Rd & Rm & Rs { build COND; local tmpRs = Rs; @@ -6232,7 +6232,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); } -:usada8^COND Rd, Rm, Rs, Rn is $(AMODE) & COND & c2027=0x78 & c0407=1 & Rd & Rn& Rm & Rs +:usada8^COND Rd, Rm, Rs, Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x78 & c0407=1 & Rd & Rn& Rm & Rs { build COND; local tmpRs = Rs; @@ -6252,7 +6252,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = Rn + ((zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1)); } -:usat^COND Rd, uSatImm5, shift4 is $(AMODE) & COND & c2127=0x37 & c0405=0x1 & uSatImm5 & Rd & shift4 +:usat^COND Rd, uSatImm5, shift4 is $(AMODE) & ARMcond=1 & COND & c2127=0x37 & c0405=0x1 & uSatImm5 & Rd & shift4 { build COND; build uSatImm5; @@ -6262,7 +6262,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = tmp; } -:usat16^COND Rd, uSatImm4, Rm is $(AMODE) & COND & c2027=0x6e & c0811=15 & c0407=0x3 & uSatImm4 & Rd & Rm +:usat16^COND Rd, uSatImm4, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x6e & c0811=15 & c0407=0x3 & uSatImm4 & Rd & Rm { build COND; build uSatImm4; @@ -6275,7 +6275,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } } # usubaddx -:usax^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=5 & Rd & rn & rm +:usax^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=5 & Rd & rn & rm { build COND; build rn; @@ -6297,7 +6297,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = zext(tmpHigh) << 16 | zext(tmpLow); } -:usub16^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=7 & Rd & rn & rm +:usub16^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=7 & Rd & rn & rm { build COND; build rn; @@ -6319,7 +6319,7 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = zext(tmpHigh) << 16 | zext(tmpLow); } -:usub8^COND Rd,rn,rm is $(AMODE) & COND & c2327=12 & c2022=5 & c0811=15 & c0407=15 & Rd & rn & rm +:usub8^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=15 & Rd & rn & rm { build COND; build rn; @@ -6345,14 +6345,14 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (zext(b4) << 24) | (zext(b3) << 16) | (zext(b2) << 8) | zext(b1); } -:uxtab^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=6 & c0407=7 & Rd & Rn & ror1 +:uxtab^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=6 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; Rd = Rn + zext(ror1:1); } -:uxtab16^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=4 & c0407=7 & Rd & Rn & ror1 +:uxtab16^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=4 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; @@ -6363,28 +6363,28 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } Rd = (tmp2n << 16) | tmp1n; } -:uxtah^COND Rd,Rn,ror1 is $(AMODE) & COND & c2327=13 & c2022=7 & c0407=7 & Rd & Rn & ror1 +:uxtah^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=7 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; Rd = Rn + zext(ror1:2); } -:uxtb^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=6 & c0407=7 & Rd & c1619=15 & ror1 +:uxtb^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=6 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; Rd = ror1 & 0x0ff; } -:uxtb16^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=4 & c0407=7 & Rd & c1619=15 & ror1 +:uxtb16^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=4 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; Rd = ror1 & 0x0ff00ff; } -:uxth^COND Rd,ror1 is $(AMODE) & COND & c2327=13 & c2022=7 & c0407=7 & Rd & c1619=15 & ror1 +:uxth^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=7 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; @@ -6397,19 +6397,19 @@ armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } @if defined(VERSION_6K) -:wfe^COND is $(AMODE) & COND & c0027=0x320f002 +:wfe^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f002 { build COND; WaitForEvent(); } -:wfi^COND is $(AMODE) & COND & c0027=0x320f003 +:wfi^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f003 { build COND; WaitForInterrupt(); } -:yield^COND is $(AMODE) & COND & c0027=0x320f001 +:yield^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f001 { build COND; HintYield(); diff --git a/Ghidra/Processors/ARM/data/languages/ARMneon.sinc b/Ghidra/Processors/ARM/data/languages/ARMneon.sinc index 975155ea5d..7d364c7714 100644 --- a/Ghidra/Processors/ARM/data/languages/ARMneon.sinc +++ b/Ghidra/Processors/ARM/data/languages/ARMneon.sinc @@ -356,10 +356,10 @@ fesize2323: "32" is TMode=0 & c2323=0 { export 2:4; } fesize2323: "16" is TMode=1 & thv_c2323=1 { export 4:4; } fesize2323: "32" is TMode=1 & thv_c2323=0 { export 2:4; } -fesize2021: "16" is TMode=0 & c2020=1 { export 4:4; } -fesize2021: "32" is TMode=0 & c2020=0 { export 2:4; } -fesize2021: "16" is TMode=1 & thv_c2020=1 { export 4:4; } -fesize2021: "32" is TMode=1 & thv_c2020=0 { export 2:4; } +fesize2020: "16" is TMode=0 & c2020=1 { export 4:4; } +fesize2020: "32" is TMode=0 & c2020=0 { export 2:4; } +fesize2020: "16" is TMode=1 & thv_c2020=1 { export 4:4; } +fesize2020: "32" is TMode=1 & thv_c2020=0 { export 2:4; } fesize1819: "16" is TMode=0 & c1819=1 { export 4:4; } fesize1819: "32" is TMode=0 & c1819=2 { export 2:4; } @@ -497,7 +497,7 @@ define pcodeop AESInvShiftRows; define pcodeop AESInvSubBytes; # F6.1.1 p3235 A1/T1 :aesd.8 Qd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001101 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001101 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001101 & thv_c0404=0)) & Qd & Qm { @@ -512,7 +512,7 @@ define pcodeop AESShiftRows; define pcodeop AESSubBytes; # F6.1.2 p3237 A1/T1 :aese.8 Qd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001100 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001100 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001100 & thv_c0404=0)) & Qd & Qm { @@ -526,7 +526,7 @@ define pcodeop AESSubBytes; define pcodeop AESInvMixColumns; # F6.1.3 p3239 A1/T1 :aesimc.8 Qd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001111 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001111 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001111 & thv_c0404=0)) & Qd & Qm { @@ -539,7 +539,7 @@ define pcodeop AESInvMixColumns; define pcodeop AESMixColumns; # F6.1.4 p3240 A1/T1 :aesmc.8 Qd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001110 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001110 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001110 & thv_c0404=0)) & Qd & Qm { @@ -552,7 +552,7 @@ define pcodeop AESMixColumns; define pcodeop SHA1HashUpdateChoose; # F6.1.7 p3248 A1/T1 :sha1c.32 Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b00 & c0811=0b1100 & c0606=1 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b00 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { @@ -567,7 +567,7 @@ define pcodeop SHA1HashUpdateChoose; # F6.1.8 p3250 A1/T1 :sha1h.32 Qd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b01 & c0611=0b001011 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b01 & c0611=0b001011 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b01 & thv_c0611=0b001011 & thv_c0404=0)) & Qd & Qm { @@ -581,7 +581,7 @@ define pcodeop SHA1HashUpdateChoose; define pcodeop SHA1HashUpdateMajority; # F6.1.9 p3251 A1/T1 :sha1m.32 Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b10 & c0811=0b1100 & c0606=1 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b10 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { @@ -597,7 +597,7 @@ define pcodeop SHA1HashUpdateMajority; define pcodeop SHA1HashUpdateParity; # F6.1.10 p3253 A1/T1 :sha1p.32 Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b01 & c0811=0b1100 & c0606=1 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b01 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { @@ -612,7 +612,7 @@ define pcodeop SHA1HashUpdateParity; # F6.1.11 p3255 A1/T1 :sha1su0.32 Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b11 & c0811=0b1100 & c0606=1 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b11 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b11 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { @@ -630,7 +630,7 @@ define pcodeop SHA1HashUpdateParity; # F6.1.12 p3257 A1/T1 :sha1su1.32 Qd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c0611=0b001110 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c0611=0b001110 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c0611=0b001110 & thv_c0404=0)) & Qd & Qm { @@ -654,7 +654,7 @@ define pcodeop SHA1HashUpdateParity; define pcodeop SHA256hash; # F6.1.13 p3259 A1/T1 :sha256h.32 Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1100 & c0606=1 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { @@ -667,7 +667,7 @@ define pcodeop SHA256hash; # F6.1.14 p3260 A1/T1 :sha256h2.32 Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1100 & c0606=1 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { @@ -681,7 +681,7 @@ define pcodeop SHA256hash; define pcodeop SHA256ScheduleUpdate0; # F6.1.15 p3261 A1/T1 :sha256su0.32 Qd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c0611=0b001111 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c0611=0b001111 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c0611=0b001111 & thv_c0404=0)) & Qd & Qm { @@ -694,7 +694,7 @@ define pcodeop SHA256ScheduleUpdate0; define pcodeop SHA256ScheduleUpdate1; # F6.1.16 p3263 A1/T1 :sha256su1.32 Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1100 & c0606=1 & c0404=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { @@ -703,77 +703,76 @@ define pcodeop SHA256ScheduleUpdate1; # TODO: watch out for c2021=3 -:vaba.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=0 & c0404=1) | +:vaba.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_c0606=0 & thv_c0404=1 ) ) & Dm & Dn & Dd & udt & esize2021 { Dd = VectorAbsoluteDifferenceAndAccumulate(Dn,Dm,esize2021,udt); } -:vaba.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=1 & c0404=1) | +:vaba.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_c0606=1 & thv_c0404=1 ) ) & Qd & Qn & Qm & udt & esize2021 { Qd = VectorAbsoluteDifferenceAndAccumulate(Qn,Qm,esize2021,udt); } -:vabal.^udt^esize2021 Qd,Dn,Dm is (($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=5 & Q6=0 & c0404=0) | +:vabal.^udt^esize2021 Qd,Dn,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=5 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=5 & thv_c0606=0 & thv_c0404=0 ) ) & Qd & Dm & Dn & udt & esize2021 { Qd = VectorAbsoluteDifferenceAndAccumulate(Dn,Dm,esize2021,udt); } -:vabd.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=0 & c0404=0) | +:vabd.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_c0606=0 & thv_c0404=0 ) ) & Dm & Dn & Dd & udt & esize2021 { Dd = VectorAbsoluteDifference(Dn,Dm,esize2021,udt); } -:vabd.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=1 & c0404=0) | +:vabd.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_Q6=1 & thv_c0404=0 ) ) & Qd & Qn & Qm & udt & esize2021 { Qd = VectorAbsoluteDifference(Qn,Qm,esize2021,udt); } -:vabdl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=7 & Q6=0 & c0404=0 ) | +:vabdl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=7 & Q6=0 & c0404=0 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=7 & thv_c0606=0 & thv_c0404=0 ) ) & Dm & Dn & Qd & udt & esize2021 { Qd = VectorAbsoluteDifference(Dn,Dm,esize2021,udt); } -:vabd.f^fesize2021 Dd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2327=6 & c0811=13 & Q6=0 & c0404=0 ) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=13 & thv_c0606=0 & thv_c0404=0 ) ) & fesize2021 & Dd & Dm & Dn +:vabd.f^fesize2020 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c0811=13 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=13 & thv_c0606=0 & thv_c0404=0 ) ) & fesize2020 & Dd & Dm & Dn { - Dd = FloatVectorAbsoluteDifference(Dn,Dm,fesize2021); + Dd = FloatVectorAbsoluteDifference(Dn,Dm,fesize2020); } -:vabd.f^fesize2021 Qd,Qn,Qm is ( ( $(AMODE) & cond=15 & c2327=6 & c0811=13 & Q6=1 & c0404=0 ) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=13 & thv_c0606=1 & thv_c0404=0 ) ) & fesize2021 & Qd & Qm & Qn +:vabd.f^fesize2020 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c0811=13 & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=13 & thv_c0606=1 & thv_c0404=0 ) ) & fesize2020 & Qd & Qm & Qn { - Qd = FloatVectorAbsoluteDifference(Qn,Qm,fesize2021); + Qd = FloatVectorAbsoluteDifference(Qn,Qm,fesize2020); } -:vabs.s^esize1819 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=6 & Q6=0 & c0404=0 ) | +:vabs.s^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=6 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=6 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 { Dd = VectorAbsolute(Dm,esize1819); } -:vabs.s^esize1819 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=6 & Q6=1 & c0404=0 ) | +:vabs.s^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=6 & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=6 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 { Qd = VectorAbsolute(Qm,esize1819); } - -:vabs.f^fesize2021 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1819=2 & c1617=1 & c0711=0xe & Q6=0 & c0404=0 ) | - ($(TMODE_F) & thv_c2327=0x1f & thv_c1819=2 & thv_c1617=1 & thv_c0711=0xe & thv_c0606=0 & thv_c0404=0 ) ) & fesize2021 & Dm & Dd +:vabs.f^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xe & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=0xe & thv_c0606=0 & thv_c0404=0 ) ) & esize1819 & Dm & Dd { - Dd = FloatVectorAbsolute(Dm,fesize2021); + Dd = FloatVectorAbsolute(Dm,esize1819); } -:vabs.f^fesize2021 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1819=2 & c1617=1 & c0711=0xe & Q6=1 & c0404=0 ) | - ($(TMODE_F) & thv_c2327=0x1f & thv_c1819=2 & thv_c1617=1 & thv_c0711=0xe & thv_c0606=1 & thv_c0404=0 ) ) & fesize2021 & Qd & Qm +:vabs.f^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xe & Q6=1 & c0404=0 ) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=0xe & thv_c0606=1 & thv_c0404=0 ) ) & esize1819 & Qd & Qm { - Qd = FloatVectorAbsolute(Qm,fesize2021); + Qd = FloatVectorAbsolute(Qm,esize1819); } @endif # SIMD @@ -818,117 +817,117 @@ define pcodeop VectorGetElement; @if defined(SIMD) -:vacge.f^fesize2021 Dd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2327=6 & c2020=0 & c0811=14 & Q6=0 & c0404=1 ) | - ($(TMODE_F) & thv_c2327=0x1f & thv_c2020=0 & thv_c0811=14 & thv_c0606=0 & thv_c0404=1 ) ) & fesize2021 & Dn & Dd & Dm +:vacge.f^fesize2020 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=0 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=14 & thv_c0606=0 & thv_c0404=1 ) ) & fesize2020 & Dn & Dd & Dm { - Dd = FloatCompareGE(Dn,Dm,fesize2021); + Dd = FloatCompareGE(Dn,Dm,fesize2020); } -:vacge.f^fesize2021 Qd,Qn,Qm is ( ( $(AMODE) & cond=15 & c2327=6 & c2020=0 & c0811=14 & Q6=1 & c0404=1 ) | - ($(TMODE_F) & thv_c2327=0x1f & thv_c2020=0 & thv_c0811=14 & thv_c0606=1 & thv_c0404=1 ) ) & fesize2021 & Qn & Qd & Qm +:vacge.f^fesize2020 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=1 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=14 & thv_c0606=1 & thv_c0404=1 ) ) & fesize2020 & Qn & Qd & Qm { - Qd = FloatCompareGE(Qn,Qm,fesize2021); + Qd = FloatCompareGE(Qn,Qm,fesize2020); } -:vacgt.f^fesize2021 Dd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2327=6 & c2020=1 & c0811=14 & Q6=0 & c0404=1 ) | - ($(TMODE_F) & thv_c2327=0x1f & thv_c2020=1 & thv_c0811=14 & thv_c0606=0 & thv_c0404=1 ) ) & fesize2021 & Dn & Dd & Dm +:vacgt.f^fesize2020 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=0 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_c0606=0 & thv_c0404=1 ) ) & fesize2020 & Dn & Dd & Dm { - Dd = FloatCompareGT(Dn,Dm,fesize2021); + Dd = FloatCompareGT(Dn,Dm,fesize2020); } -:vacgt.f^fesize2021 Qd,Qn,Qm is ( ( $(AMODE) & cond=15 & c2327=6 & c2020=1 & c0811=14 & Q6=1 & c0404=1 ) | - ($(TMODE_F) & thv_c2327=0x1f & thv_c2020=1 & thv_c0811=14 & thv_c0606=1 & thv_c0404=1 ) ) & fesize2021 & Qn & Qd & Qm +:vacgt.f^fesize2020 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=1 & c0404=1 ) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_c0606=1 & thv_c0404=1 ) ) & fesize2020 & Qn & Qd & Qm { - Qd = FloatCompareGT(Qn,Qm,fesize2021); + Qd = FloatCompareGT(Qn,Qm,fesize2020); } -:vadd.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=8 & Q6=0 & c0404=0) | +:vadd.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=8 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dn & Dd & Dm { Dd = VectorAdd(Dn,Dm,esize2021); } -:vadd.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=8 & Q6=1 & c0404=0) | +:vadd.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=8 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_Q6=1 & thv_c0404=0)) & esize2021 & Qm & Qn & Qd { Qd = VectorAdd(Qn,Qm,esize2021); } -:vadd.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=0 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm & Dn & Dd +:vadd.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { - Dd = FloatVectorAdd(Dn,Dm,fesize2021); + Dd = FloatVectorAdd(Dn,Dm,fesize2020); } -:vadd.f^fesize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=1 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=1 & thv_c0404=0) ) & fesize2021 & Qn & Qd & Qm +:vadd.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=1 & thv_c0404=0) ) & fesize2020 & Qn & Qd & Qm { - Qd = FloatVectorAdd(Qn,Qm,fesize2021); + Qd = FloatVectorAdd(Qn,Qm,fesize2020); } -:vpadd.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=11 & Q6=0 & c0404=1) | +:vpadd.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=11 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=11 & thv_Q6=0 & thv_c0404=1)) & esize2021 & Dn & Dd & Dm { Dd = VectorPairwiseAdd(Dn,Dm,esize2021); } -:vpadd.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021<3 & c0811=11 & Q6=1 & c0404=1) | +:vpadd.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021<3 & c0811=11 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=11 & thv_Q6=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd { Qd = VectorPairwiseAdd(Qn,Qm,esize2021); } -:vpadd.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=0 & c0811=13 & Q6=0 & c0404=0) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm& Dn & Dd +:vpadd.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=13 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm& Dn & Dd { - Dd = FloatVectorPairwiseAdd(Dn,Dm,fesize2021:1); + Dd = FloatVectorPairwiseAdd(Dn,Dm,fesize2020:1); } -:vpmax.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=10 & Q6=0 & c0404=0) | +:vpmax.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=10 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=10 & thv_Q6=0 & thv_c0404=0)) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorPairwiseMax(Dn,Dm,esize2021,udt); } -:vpmax.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=0 & c0811=15 & Q6=0 & c0404=0) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm & Dn & Dd +:vpmax.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=15 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { - Dd = FloatVectorPairwiseMax(Dn,Dm,fesize2021:1); + Dd = FloatVectorPairwiseMax(Dn,Dm,fesize2020:1); } -:vpmin.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=10 & Q6=0 & c0404=1) | +:vpmin.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=10 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=10 & thv_Q6=0 & thv_c0404=1)) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorPairwiseMin(Dn,Dm,esize2021,udt); } -:vpmin.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=1 & c0811=15 & Q6=0 & c0404=0) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm & Dn & Dd +:vpmin.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=15 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { - Dd = FloatVectorPairwiseMin(Dn,Dm,fesize2021); + Dd = FloatVectorPairwiseMin(Dn,Dm,fesize2020); } -:vpadal.^udt7^esize1819 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=6 & Q6=0 & c0404=0) | +:vpadal.^udt7^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=6 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=6 & thv_Q6=0 & thv_c0404=0)) & udt7 & esize1819 & Dd & Dm { Dd = VectorPairwiseAddAccumulateLong(Dm,esize1819); } -:vpadal.^udt7^esize1819 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=6 & Q6=1 & c0404=0) | +:vpadal.^udt7^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=6 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=6 & thv_Q6=1 & thv_c0404=0)) & udt7 & esize1819 & Qd & Qm { Qd = VectorPairwiseAddAccumulateLong(Qm,esize1819); } -:vpaddl.^udt7^esize1819 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=2 & Q6=0 & c0404=0) | +:vpaddl.^udt7^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=2 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=2 & thv_Q6=0 & thv_c0404=0)) & udt7 & esize1819 & Dd & Dm { Dd = VectorPairwiseAddLong(Dm,esize1819); } -:vpaddl.^udt7^esize1819 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=2 & Q6=1 & c0404=0) | +:vpaddl.^udt7^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=2 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=2 & thv_Q6=1 & thv_c0404=0)) & udt7 & esize1819 & Qd & Qm { Qd = VectorPairwiseAddLong(Qm,esize1819); @@ -989,104 +988,104 @@ define pcodeop VectorComplexMultiplyAccumulateByElement; @if defined(SIMD) -:vaddhn.i^esize2021x2 Dd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=5 & c2021<3 & c0811=4 & c0606=0 & c0404=0) | +:vaddhn.i^esize2021x2 Dd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021<3 & c0811=4 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=4 & thv_c0606=0 & thv_c0404=0) ) & esize2021x2 & Qn & Dd & Qm { Dd = VectorAddReturnHigh(Qn,Qm,esize2021x2); } -:vaddl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=0 & c0606=0 & c0404=0) | +:vaddl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=0 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=0 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Dn & Qd & Dm { Qd = VectorAdd(Dn,Dm,esize2021,udt); } -:vaddw.^udt^esize2021 Qd,Qn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=1 & c0606=0 & c0404=0) | +:vaddw.^udt^esize2021 Qd,Qn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=1 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=1 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Qn & Qd & Dm { Qd = VectorAdd(Qn,Dm,esize2021,udt); } -:vand Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=1 & Q6=0 & c0404=1) | +:vand Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=0 & c0811=1 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = Dn & Dm; } -:vand Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=1 & Q6=1 & c0404=1) | +:vand Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=0 & c0811=1 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qn & Qd & Qm { Qd = Qn & Qm; } -:vbic.i32 Dd,simdExpImm_8 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1111=0 & c0808=1 & c0407=3 ) | +:vbic.i32 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1111=0 & c0808=1 & c0407=3 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1111=0 & thv_c0808=1 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = Dd & ~simdExpImm_8; } -:vbic.i32 Qd,simdExpImm_16 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1111=0 & c0808=1 & c0407=7 ) | +:vbic.i32 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1111=0 & c0808=1 & c0407=7 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1111=0 & thv_c0808=1 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = Qd & ~simdExpImm_16; } -:vbic.i16 Dd,simdExpImm_8 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=1 & c0407=3 ) | +:vbic.i16 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=1 & c0407=3 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=2 & thv_c0808=1 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = Dd & ~simdExpImm_8; } -:vbic.i16 Qd,simdExpImm_16 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=1 & c0407=7 ) | +:vbic.i16 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=1 & c0407=7 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=2 & thv_c0808=1 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = Qd & ~simdExpImm_16; } -:vbic Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=1 & c0811=1 & Q6=0 & c0404=1 ) | +:vbic Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=1 & c0811=1 & Q6=0 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1) ) & Dm & Dn & Dd { Dd = Dn & ~Dm; } -:vbic Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=1 & c0811=1 & Q6=1 & c0404=1 ) | +:vbic Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=1 & c0811=1 & Q6=1 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = Qn & ~Qm; } -:vbif Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=3 & c0811=1 & Q6=0 & c0404=1 ) | +:vbif Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=3 & c0811=1 & Q6=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1) ) & Dm & Dn & Dd { Dd = VectorBitwiseInsertIfFalse(Dd,Dn,Dm); } -:vbif Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=3 & c0811=1 & Q6=1 & c0404=1 ) | +:vbif Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=3 & c0811=1 & Q6=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qm & Qn & Qd { Qd = VectorBitwiseInsertIfFalse(Qd,Qn,Qm); } -:vbit Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=2 & c0811=1 & Q6=0 & c0404=1 ) | +:vbit Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=2 & c0811=1 & Q6=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dm & Dn & Dd { Dd = VectorBitwiseInsertIfTrue(Dd,Dn,Dm); } -:vbit Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=2 & c0811=1 & Q6=1 & c0404=1 ) | +:vbit Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=2 & c0811=1 & Q6=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qm & Qn & Qd { Qd = VectorBitwiseInsertIfTrue(Qd,Qn,Qm); } -:vbsl Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=1 & c0811=1 & Q6=0 & c0404=1 ) | +:vbsl Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=1 & c0811=1 & Q6=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dm & Dn & Dd { Dd = VectorBitwiseSelect(Dd,Dn,Dm); } -:vbsl Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=1 & c0811=1 & Q6=1 & c0404=1 ) | +:vbsl Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=1 & c0811=1 & Q6=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qm & Qn & Qd { Qd = VectorBitwiseSelect(Qd,Qn,Qm); @@ -1096,230 +1095,230 @@ crot2424: "#"^90 is ($(AMODE) & c2424=0 ) | (TMode=1 & thv_c2424=0) { local tmp crot2424: "#"^270 is ($(AMODE) & c2424=1 ) | (TMode=1 & thv_c2424=1) { local tmp:4 = 270; export tmp; } -:vcadd.f^fesize2021 Dd,Dn,Dm,crot2424 is ( ($(AMODE) & cond=15 & c2527=6 & c2323=1 & c2121=0 & c0811=8 & Q6=0 & c0404=1 ) | - ($(TMODE_F) & thv_c2527=6 & thv_c2323=1 & thv_c2121=0 & thv_c0811=8 & thv_Q6=0 & thv_c0404=1)) & crot2424 & fesize2021 & Dm & Dn & Dd +:vcadd.f^fesize2020 Dd,Dn,Dm,crot2424 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=6 & c2323=1 & c2121=0 & c0811=8 & Q6=0 & c0404=1 ) | + ($(TMODE_F) & thv_c2527=6 & thv_c2323=1 & thv_c2121=0 & thv_c0811=8 & thv_Q6=0 & thv_c0404=1)) & crot2424 & fesize2020 & Dm & Dn & Dd { - Dd = VectorComplexAdd(Dd,Dn,Dm,crot2424,fesize2021); + Dd = VectorComplexAdd(Dd,Dn,Dm,crot2424,fesize2020); } -:vcadd.f^fesize2021 Qd,Qn,Qm,crot2424 is ( ($(AMODE) & cond=15 & c2527=6 & c2323=1 & c2121=0 & c0811=8 & Q6=1 & c0404=1 )| - ($(TMODE_F) & thv_c2527=6 & thv_c2323=1 & thv_c2021=0 & thv_c0811=8 & thv_Q6=1 & thv_c0404=1)) & crot2424 & fesize2021 & Qm & Qn & Qd +:vcadd.f^fesize2020 Qd,Qn,Qm,crot2424 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=6 & c2323=1 & c2121=0 & c0811=8 & Q6=1 & c0404=1 )| + ($(TMODE_F) & thv_c2527=6 & thv_c2323=1 & thv_c2021=0 & thv_c0811=8 & thv_Q6=1 & thv_c0404=1)) & crot2424 & fesize2020 & Qm & Qn & Qd { - Qd = VectorComplexAdd(Qd,Qn,Qm,crot2424,fesize2021); + Qd = VectorComplexAdd(Qd,Qn,Qm,crot2424,fesize2020); } -:vceq.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021<3 & c0811=8 & Q6=0 & c0404=1) | +:vceq.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021<3 & c0811=8 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=8 & thv_Q6=0 & thv_c0404=1) ) & esize2021 & Dm & Dn & Dd { Dd = VectorCompareEqual(Dn,Dm,esize2021); } -:vceq.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021<3 & c0811=8 & Q6=1 & c0404=1) | +:vceq.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021<3 & c0811=8 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=8 & thv_Q6=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd { Qd = VectorCompareEqual(Qn,Qm,esize2021); } -:vceq.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=14 & Q6=0 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm & Dn & Dd +:vceq.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=14 & Q6=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { - Dd = FloatVectorCompareEqual(Dn,Dm,fesize2021); + Dd = FloatVectorCompareEqual(Dn,Dm,fesize2020); } -:vceq.f^fesize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=14 & Q6=1 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & fesize2021 & Qm & Qn & Qd +:vceq.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=14 & Q6=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & fesize2020 & Qm & Qn & Qd { - Qd = FloatVectorCompareEqual(Qn,Qm,fesize2021); + Qd = FloatVectorCompareEqual(Qn,Qm,fesize2020); } -:vceq.i^esize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=2 & Q6=0 & c0404=0) | +:vceq.i^esize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=2 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=2 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero { Dd = VectorCompareEqual(Dm,zero,esize1819); } -:vceq.i^esize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=2 & Q6=1 & c0404=0) | +:vceq.i^esize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=2 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=2 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero { Qd = VectorCompareEqual(Qm,zero,esize1819); } -:vceq.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=10 & Q6=0 & c0404=0) | +:vceq.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=10 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=10 & thv_Q6=0 & thv_c0404=0) ) & fesize1819 & Dm & Dd & zero { Dd = FloatVectorCompareEqual(Dm,zero,fesize1819); } -:vceq.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=1 & c0711=10 & Q6=1 & c0404=0) | +:vceq.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=1 & c0711=10 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=1 & thv_c0711=10 & thv_Q6=1 & thv_c0404=0) ) & fesize1819 & Qm & Qd & zero { Qd = FloatVectorCompareEqual(Qm,zero,fesize1819); } -:vcge.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=0 & c0404=1) | +:vcge.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=0 & thv_c0404=1) ) & udt & esize2021 & Dm & Dn & Dd { Dd = VectorCompareGreaterThanOrEqual(Dn,Dm,esize2021,udt); } -:vcge.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=1 & c0404=1) | +:vcge.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=1 & thv_c0404=1) ) & udt & esize2021 & Qm & Qn & Qd { Qd = VectorCompareGreaterThanOrEqual(Qn,Qm,esize2021,udt); } -:vcge.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=0 & c0404=0) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm & Dn & Dd +:vcge.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { Dd = FloatVectorCompareGreaterThanOrEqual(Dn,Dm,2:1,32:1); } -:vcge.f^fesize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=1 & c0404=0) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & fesize2021 & Qm & Qn & Qd +:vcge.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & fesize2020 & Qm & Qn & Qd { Qd = FloatVectorCompareGreaterThanOrEqual(Qn,Qm,2:1,32:1); } -:vcge.s^esize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=1 & Q6=0 & c0404=0) | +:vcge.s^esize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=1 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=1 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero { Dd = VectorCompareGreaterThanOrEqual(Dm,zero,esize1819); } -:vcge.s^esize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=1 & Q6=1 & c0404=0) | +:vcge.s^esize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=1 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=1 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero { Qd = VectorCompareGreaterThanOrEqual(Qm,zero,esize1819); } -:vcge.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=9 & Q6=0 & c0404=0) | +:vcge.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=9 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=9 & thv_Q6=0 & thv_c0404=0) ) & fesize1819 & Dm & Dd & zero { Dd = FloatVectorCompareGreaterThanOrEqual(Dm,zero,fesize1819); } -:vcge.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=9 & Q6=1 & c0404=0) | +:vcge.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=9 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=9 & thv_Q6=1 & thv_c0404=0) ) & fesize1819 & Qm & Qd & zero { Qd = FloatVectorCompareGreaterThanOrEqual(Qm,zero,fesize1819); } -:vcgt.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=0 & c0404=0) | +:vcgt.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=0 & thv_c0404=0) ) & udt & esize2021 & Dm & Dn & Dd { Dd = VectorCompareGreaterThan(Dn,Dm,esize2021); } -:vcgt.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=1 & c0404=0) | +:vcgt.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=1 & thv_c0404=0) ) & udt & esize2021 & Qm & Qn & Qd { Qd = VectorCompareGreaterThan(Qn,Qm,esize2021); } -:vcgt.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=0 & c0404=0) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & fesize2021 & Dm & Dn & Dd +:vcgt.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { - Dd = FloatVectorCompareGreaterThan(Dn,Dm,fesize2021); + Dd = FloatVectorCompareGreaterThan(Dn,Dm,fesize2020); } -:vcgt.f^fesize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=1 & c0404=0) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & fesize2021 & Qm & Qn & Qd +:vcgt.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & fesize2020 & Qm & Qn & Qd { - Qd = FloatVectorCompareGreaterThan(Qn,Qm,fesize2021); + Qd = FloatVectorCompareGreaterThan(Qn,Qm,fesize2020); } -:vcgt.i^esize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0 & Q6=0 & c0404=0) | +:vcgt.i^esize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=0 & thv_Q6=0 & thv_c0404=0 ) ) & esize1819 & Dd & Dm & zero { Dd = VectorCompareGreaterThan(Dm,zero,esize1819); } -:vcgt.i^esize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0 & Q6=1 & c0404=0) | +:vcgt.i^esize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=0 & thv_Q6=1 & thv_c0404=0 ) ) & esize1819 & Qd & Qm & zero { Qd = VectorCompareGreaterThan(Qm,zero,esize1819); } -:vcgt.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=8 & Q6=0 & c0404=0) | +:vcgt.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=8 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=8 & thv_Q6=0 & thv_c0404=0 ) ) & fesize1819 & Dd & Dm & zero { Dd = FloatVectorCompareGreaterThan(Dm,zero,fesize1819); } -:vcgt.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=8 & Q6=1 & c0404=0) | +:vcgt.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=8 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=8 & thv_Q6=1 & thv_c0404=0 ) ) & fesize1819 & Qd & Qm & zero { Qd = FloatVectorCompareGreaterThan(Qm,zero,fesize1819); } -:vcle.s^esize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=3 & Q6=0 & c0404=0) | +:vcle.s^esize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=3 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=3 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dd & Dm & zero { Dd = VectorCompareGreaterThanOrEqual(zero,Dm,esize1819); } -:vcle.s^esize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=3 & Q6=1 & c0404=0) | +:vcle.s^esize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=3 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=3 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qd & Qm & zero { Qd = VectorCompareGreaterThanOrEqual(zero,Qm,esize1819); } -:vcle.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xb & Q6=0 & c0404=0) | +:vcle.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xb & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=0xb & thv_Q6=0 & thv_c0404=0) ) & fesize1819 & Dd & Dm & zero { Dd = FloatVectorCompareGreaterThanOrEqual(zero,Dm,fesize1819); } -:vcle.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xb & Q6=1 & c0404=0) | +:vcle.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xb & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=0xb & thv_Q6=1 & thv_c0404=0) ) & fesize1819 & Qd & Qm & zero { Qd = FloatVectorCompareGreaterThanOrEqual(zero,Qm,fesize1819); } -:vcls.s^esize1819 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=8 & Q6=0 & c0404=0) | +:vcls.s^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=8 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=8 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dd & Dm { Dd = VectorCountLeadingSignBits(Dm,esize1819); } -:vcls.s^esize1819 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=8 & Q6=1 & c0404=0) | +:vcls.s^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=8 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=8 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qd & Qm { Qd = VectorCountLeadingSignBits(Qm,esize1819); } -:vclt.s^esize1819 Dd,Dm,zero is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=4 & Q6=0 & c0404=0) | +:vclt.s^esize1819 Dd,Dm,zero is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=4 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=4 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero { Dd = VectorCompareGreaterThan(zero,Dm,esize1819); } -:vclt.s^esize1819 Qd,Qm,zero is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=4 & Q6=1 & c0404=0) | +:vclt.s^esize1819 Qd,Qm,zero is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=4 & Q6=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=4 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero { Qd = VectorCompareGreaterThan(zero,Qm,esize1819); } -:vclt.f^fesize1819 Dd,Dm,zero is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=12 & Q6=0 & c0404=0) | +:vclt.f^fesize1819 Dd,Dm,zero is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=12 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=12 & thv_Q6=0 & thv_c0404=0) ) & fesize1819 & Dm & Dd & zero { Dd = FloatVectorCompareGreaterThan(zero,Dm,fesize1819); } -:vclt.f^fesize1819 Qd,Qm,zero is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=12 & Q6=1 & c0404=0) | +:vclt.f^fesize1819 Qd,Qm,zero is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=12 & Q6=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=12 & thv_Q6=1 & thv_c0404=0) ) & fesize1819 & Qm & Qd & zero { Qd = FloatVectorCompareGreaterThan(zero,Qm,fesize1819); } -:vclz.i^esize1819 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=9 & Q6=0 & c0404=0) | +:vclz.i^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=9 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=9 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dd & Dm { Dd = VectorCountLeadingZeros(Dm,esize1819); } -:vclz.i^esize1819 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=9 & Q6=1 & c0404=0) | +:vclz.i^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=9 & Q6=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=9 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qd & Qm { Qd = VectorCountLeadingZeros(Qm,esize1819); @@ -1334,26 +1333,26 @@ crot2021: "#"^90 is ($(AMODE) & c2021=1 ) | (TMode=1 & thv_c2021=1) { local tmp: crot2021: "#"^180 is ($(AMODE) & c2021=2 ) | (TMode=1 & thv_c2021=2) { local tmp:4 = 180; export tmp; } crot2021: "#"^270 is ($(AMODE) & c2021=3 ) | (TMode=1 & thv_c2021=3) { local tmp:4 = 270; export tmp; } -:vcmla.f^fesize2021 Dd,Dn,Dm,crot2324 is ( ($(AMODE) & cond=15 & c2527=6 & c2323=1 & c2121=1 & c0811=8 & Q6=0 & c0404=0 ) | - ($(TMODE_F) & thv_c2527=6 & thv_c2323=1 & thv_c2121=1 & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & crot2324 & fesize2021 & Dm & Dn & Dd +:vcmla.f^fesize2020 Dd,Dn,Dm,crot2324 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=6 & c2121=1 & c0811=8 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2527=6 & thv_c2121=1 & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & crot2324 & fesize2020 & Dm & Dn & Dd { - Dd = VectorComplexMultiplyAccumulate(Dd,Dn,Dm,crot2324,fesize2021); + Dd = VectorComplexMultiplyAccumulate(Dd,Dn,Dm,crot2324,fesize2020); } -:vcmla.f^fesize2021 Qd,Qn,Qm,crot2324 is ( ($(AMODE) & cond=15 & c2527=6 & c2323=1 & c2121=1 & c0811=8 & Q6=1 & c0404=0 )| - ($(TMODE_F) & thv_c2527=6 & thv_c2323=1 & thv_c2021=1 & thv_c0811=8 & thv_Q6=1 & thv_c0404=0)) & crot2324 & fesize2021 & Qm & Qn & Qd +:vcmla.f^fesize2020 Qd,Qn,Qm,crot2324 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=6 & c2121=1 & c0811=8 & Q6=1 & c0404=0 )| + ($(TMODE_F) & thv_c2527=6 & thv_c2021=1 & thv_c0811=8 & thv_Q6=1 & thv_c0404=0)) & crot2324 & fesize2020 & Qm & Qn & Qd { - Qd = VectorComplexMultiplyAccumulate(Qd,Qn,Qm,crot2324,fesize2021); + Qd = VectorComplexMultiplyAccumulate(Qd,Qn,Qm,crot2324,fesize2020); } -:vcmla.f^fesize2323 Dd,Dn,Dm,crot2021 is ( ($(AMODE) & cond=15 & c2527=7 & c2424=0 & c2121=1 & c0811=8 & Q6=0 & c0404=0 ) | - ($(TMODE_F) & thv_c2527=7 & thv_c2424=0 & thv_c2121=1 & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & crot2021 & fesize2323 & Dm & Dn & Dd +:vcmla.f^fesize2323 Dd,Dn,Dm,crot2021 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=7 & c2424=0 & c0811=8 & Q6=0 & c0404=0 ) | + ($(TMODE_F) & thv_c2527=7 & thv_c2424=0 & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & crot2021 & fesize2323 & Dm & Dn & Dd { Dd = VectorComplexMultiplyAccumulateByElement(Dd,Dn,Dm,crot2021,fesize2323); } -:vcmla.f^fesize2323 Qd,Qn,Qm,crot2021 is ( ($(AMODE) & cond=15 & c2527=7 & c2424=0 & c2121=1 & c0811=8 & Q6=1 & c0404=0 )| - ($(TMODE_F) & thv_c2527=7 & thv_c2424=0 & thv_c2021=1 & thv_c0811=8 & thv_Q6=1 & thv_c0404=0)) & crot2021 & fesize2323 & Qm & Qn & Qd +:vcmla.f^fesize2323 Qd,Qn,Qm,crot2021 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=7 & c2424=0 & c0811=8 & Q6=1 & c0404=0 )| + ($(TMODE_F) & thv_c2527=7 & thv_c2424=0 & thv_c0811=8 & thv_Q6=1 & thv_c0404=0)) & crot2021 & fesize2323 & Qm & Qn & Qd { Qd = VectorComplexMultiplyAccumulateByElement(Qd,Qn,Qm,crot2021,fesize2323); } @@ -1453,67 +1452,67 @@ define pcodeop VectorUnsignedToFloat; # VCVT (between floating-point and integer, Advanced SIMD) # -:vcnt.8 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0711=10 & Q6=0 & c0404=0) | +:vcnt.8 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0711=10 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0711=10 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm { Dd = VectorCountOneBits(Dm,8:1,8:1); } -:vcnt.8 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819=0 & c1617=0 & c0711=10 & Q6=1 & c0404=0) | +:vcnt.8 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819=0 & c1617=0 & c0711=10 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0711=10 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm { Qd = VectorCountOneBits(Qm,8:1,8:1); } @ifndef VERSION_8 -:vcvt.s16.f16 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=2 & Q6=0 & c0404=0 ) | +:vcvt.s16.f16 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=2 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=2 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorFloatToSigned(Dm,3:1); } -:vcvt.u16.f16 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=3 & Q6=0 & c0404=0 ) | +:vcvt.u16.f16 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=3 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=3 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorFloatToUnsigned(Dm,0:1); } -:vcvt.f16.s16 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=0 & Q6=0 & c0404=0) | +:vcvt.f16.s16 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=0 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=0 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorSignedToFloat(Dm,0:1); } -:vcvt.f16.u16 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=1 & Q6=0 & c0404=0 ) | +:vcvt.f16.u16 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=1 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=1 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorUnsignedToFloat(Dm,0:1); } -:vcvt.s32.f32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=2 & Q6=0 & c0404=0 ) | +:vcvt.s32.f32 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=2 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=2 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorFloatToSigned(Dm,3:1); } -:vcvt.u32.f32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=3 & Q6=0 & c0404=0 ) | +:vcvt.u32.f32 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=3 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=3 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorFloatToUnsigned(Dm,3:1); } -:vcvt.f32.s32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=0 & Q6=0 & c0404=0) | +:vcvt.f32.s32 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=0 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=0 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorSignedToFloat(Dm,0:1); } -:vcvt.f32.u32 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=1 & Q6=0 & c0404=0 ) | +:vcvt.f32.u32 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=1 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=1 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { @@ -1524,56 +1523,56 @@ define pcodeop VectorUnsignedToFloat; -:vcvt.s16.f16 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=2 & Q6=1 & c0404=0 ) | +:vcvt.s16.f16 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=2 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=2 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorFloatToSigned(Qm,6:1); } -:vcvt.u16.f16 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=3 & Q6=1 & c0404=0 ) | +:vcvt.u16.f16 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=3 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=3 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorFloatToUnsigned(Qm,7:1); } -:vcvt.f16.s16 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=0 & Q6=1 & c0404=0 ) | +:vcvt.f16.s16 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=0 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorSignedToFloat(Qm,4:1); } -:vcvt.f16.u16 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=1 & Q6=1 & c0404=0 ) | +:vcvt.f16.u16 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=1 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=1 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorUnsignedToFloat(Qm,5:1); } -:vcvt.s32.f32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=2 & Q6=1 & c0404=0 ) | +:vcvt.s32.f32 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=2 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=2 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorFloatToSigned(Qm,10:1); } -:vcvt.u32.f32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=3 & Q6=1 & c0404=0 ) | +:vcvt.u32.f32 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=3 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=3 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorFloatToUnsigned(Qm,11:1); } -:vcvt.f32.s32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=0 & Q6=1 & c0404=0 ) | +:vcvt.f32.s32 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=0 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorSignedToFloat(Qm,8:1); } -:vcvt.f32.u32 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=1 & Q6=1 & c0404=0 ) | +:vcvt.f32.u32 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=1 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=1 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { @@ -1736,86 +1735,86 @@ define pcodeop VectorUnsignedFixedToFloat; fbits: "#"val is TMode=0 & c1621 [ val = 64 - c1621; ] { tmp:1 = val; export tmp; } fbits: "#"val is TMode=1 & thv_c1621 [ val = 64 - thv_c1621; ] { tmp:1 = val; export tmp; } -:vcvt.s16.f16 Dd,Dm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=0 & c0404=1) | +:vcvt.s16.f16 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=1 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorFloatToSignedFixed(Dm,fbits); } -:vcvt.u16.f16 Dd,Dm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=0 & c0404=1) | +:vcvt.u16.f16 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=1 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorFloatToUnsignedFixed(Dm,fbits); } -:vcvt.f16.s16 Dd,Dm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=6 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | +:vcvt.f16.s16 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=6 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorSignedFixedToFloat(Dm,fbits); } -:vcvt.f16.u16 Dd,Dm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=6 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | +:vcvt.f16.u16 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=6 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorUnsignedFixedToFloat(Dm,fbits); } -:vcvt.s16.f16 Qd,Qm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | +:vcvt.s16.f16 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorFloatToSignedFixed(Qm,fbits); } -:vcvt.u16.f16 Qd,Qm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | +:vcvt.u16.f16 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorFloatToUnsignedFixed(Qm,fbits); } -:vcvt.f16.s16 Qd,Qm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=0 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | +:vcvt.f16.s16 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=0 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=0 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorSignedFixedToFloat(Qm,fbits); } -:vcvt.f16.u16 Qd,Qm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=0 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | +:vcvt.f16.u16 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=0 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=0 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorUnsignedFixedToFloat(Qm,fbits); } -:vcvt.f32.s32 Dd,Dm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | +:vcvt.f32.s32 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorSignedFixedToFloat(Dm,fbits); } -:vcvt.f32.u32 Dd,Dm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | +:vcvt.f32.u32 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorUnsignedFixedToFloat(Dm,fbits); } -:vcvt.s32.f32 Qd,Qm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=7 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | +:vcvt.s32.f32 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=7 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorFloatToSignedFixed(Qm,fbits); } -:vcvt.u32.f32 Qd,Qm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=7 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | +:vcvt.u32.f32 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=7 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorFloatToUnsignedFixed(Qm,fbits); } -:vcvt.f32.s32 Qd,Qm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | +:vcvt.f32.s32 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorSignedFixedToFloat(Qm,fbits); } -:vcvt.f32.u32 Qd,Qm,fbits is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | +:vcvt.f32.u32 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorUnsignedFixedToFloat(Qm,fbits); @@ -2124,7 +2123,7 @@ define pcodeop FloatSingleToBFloat16; # VCVT (between single-precision and BFloat16) # -:vcvt.bf16.f32 Dd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=0 & c0607=1 & c0404=0 ) | +:vcvt.bf16.f32 Dd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=0 & c0607=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x36 & thv_c0911=3 & thv_c0808=0 & thv_c0607=1 & thv_c0404=0 ) ) & Dd & Qm { @@ -2160,14 +2159,14 @@ define pcodeop VectorFloatHalfToSingle; ####### # VCVT (between half-precision and single-precision) # -:vcvt.f16.f32 Dd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=0 & c0607=0 & c0404=0 ) | +:vcvt.f16.f32 Dd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=0 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x36 & thv_c0911=3 & thv_c0808=0 & thv_c0607=0 & thv_c0404=0 ) ) & Dd & Qm { Dd = VectorFloatSingleToHalf(Qm, 4:1, 16:1); } -:vcvt.f16.f32 Qd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=1 & c0607=0 & c0404=0 ) | +:vcvt.f16.f32 Qd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=1 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x36 & thv_c0911=3 & thv_c0808=1 & thv_c0607=0 & thv_c0404=0 ) ) & Qd & Dm { @@ -2179,49 +2178,49 @@ define pcodeop VectorFloatToSignedRound; define pcodeop VectorFloatToUnsignedRound; # VCVTA/M/N/P Vector convert floating-point to integer with Rounding -:vcvt^roundType^".s16.f16" Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=0 & Q6=0 & c0404=0 ) | +:vcvt^roundType^".s16.f16" Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=0 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xb & thv_c1011=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=0 ) ) & roundType & Dd & Dm { Dd = VectorFloatToSignedRound(Dm, 0:1, roundType); } -:vcvt^roundType^".u16.f16" Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=1 & Q6=0 & c0404=0 ) | +:vcvt^roundType^".u16.f16" Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=1 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xb & thv_c1011=0 & thv_c0707=1 & thv_c0606=0 & thv_c0404=0 ) ) & roundType & Dd & Dm { Dd = VectorFloatToUnsignedRound(Dm, 0:1, roundType); } -:vcvt^roundType^".s32.f32" Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=0 & Q6=0 & c0404=0 ) | +:vcvt^roundType^".s32.f32" Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=0 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xc & thv_c1011=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=0 ) ) & roundType & Dd & Dm { Dd = VectorFloatToSignedRound(Dm, 1:1, roundType); } -:vcvt^roundType^".u32.f32" Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=1 & Q6=0 & c0404=0 ) | +:vcvt^roundType^".u32.f32" Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=1 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xc & thv_c1011=0 & thv_c0707=1 & thv_c0606=0 & thv_c0404=0 ) ) & roundType & Dd & Dm { Dd = VectorFloatToUnsignedRound(Dm, 1:1, roundType); } -:vcvt^roundType^".s16.f16" Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=0 & Q6=1 & c0404=0 ) | +:vcvt^roundType^".s16.f16" Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xb & thv_c1011=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0 ) ) & roundType & Qd & Qm { Qd = VectorFloatToSignedRound(Qm, 0:1, roundType); } -:vcvt^roundType^".u16.f16" Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=1 & Q6=1 & c0404=0 ) | +:vcvt^roundType^".u16.f16" Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=1 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xb & thv_c1011=0 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0 ) ) & roundType & Qd & Qm { Qd = VectorFloatToUnsignedRound(Qm, 0:1, roundType); } -:vcvt^roundType^".s32.f32" Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=0 & Q6=1 & c0404=0 ) | +:vcvt^roundType^".s32.f32" Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xc & thv_c1011=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0 ) ) & roundType & Qd & Qm { Qd = VectorFloatToSignedRound(Qm, 1:1, roundType); } -:vcvt^roundType^".u32.f32" Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=1 & Q6=1 & c0404=0 ) | +:vcvt^roundType^".u32.f32" Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=1 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xc & thv_c1011=0 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0 ) ) & roundType & Qd & Qm { Qd = VectorFloatToUnsignedRound(Qm, 1:1, roundType); @@ -2238,37 +2237,37 @@ define pcodeop FloatToUnsignedRound; # VCVTA/M/N/P Float convert floating-point to integer with Rounding -:vcvt^roundType^".s32.f16" Sd,Sm is ( ( $(AMODE) & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=1 & c0607=0 & c0404=0 ) | +:vcvt^roundType^".s32.f16" Sd,Sm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=1 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=4 & thv_c0808=1 & thv_c0607=0 & thv_c0404=0 ) ) & roundType & Sd & Sm { local sm16:2 = Sm(0); Sd = FloatToSignedRound(sm16, roundType); } -:vcvt^roundType^".u32.f16" Sd,Sm is ( ( $(AMODE) & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=1 & c0607=1 & c0404=0 ) | +:vcvt^roundType^".u32.f16" Sd,Sm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=1 & c0607=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=4 & thv_c0808=1 & thv_c0607=1 & thv_c0404=0 ) ) & roundType & Sd & Sm { local sm16:2 = Sm(0); Sd = FloatToUnsignedRound(sm16, roundType); } -:vcvt^roundType^".s32.f32" Sd,Sm is ( ( $(AMODE) & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=0 & c0607=0 & c0404=0 ) | +:vcvt^roundType^".s32.f32" Sd,Sm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=0 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=4 & thv_c0808=0 & thv_c0607=0 & thv_c0404=0 ) ) & roundType & Sd & Sm { Sd = FloatToSignedRound(Sm, roundType); } -:vcvt^roundType^".u32.f32" Sd,Sm is ( ( $(AMODE) & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=0 & c0607=1 & c0404=0 ) | +:vcvt^roundType^".u32.f32" Sd,Sm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=0 & c0607=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=4 & thv_c0808=0 & thv_c0607=1 & thv_c0404=0 ) ) & roundType & Sd & Sm { Sd = FloatToUnsignedRound(Sm, roundType); } -:vcvt^roundType^".s32.f64" Sd,Dm is ( ( $(AMODE) & cond=15 & c2327=0x1d & c1821=0xf & c0911=5 & c0808=1 & c0607=0 & c0404=0 ) | +:vcvt^roundType^".s32.f64" Sd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=5 & c0808=1 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=5 & thv_c0808=1 & thv_c0607=0 & thv_c0404=0 ) ) & roundType & Sd & Dm { Sd = FloatToSignedRound(Dm, roundType); } -:vcvt^roundType^".u32.f64" Sd,Dm is ( ( $(AMODE) & cond=15 & c2327=0x1d & c1821=0xf & c0911=5 & c0808=1 & c0607=1 & c0404=0 ) | +:vcvt^roundType^".u32.f64" Sd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=5 & c0808=1 & c0607=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=5 & thv_c0808=1 & thv_c0607=1 & thv_c0404=0 ) ) & roundType & Sd & Dm { Sd = FloatToUnsignedRound(Dm, roundType); @@ -2413,13 +2412,13 @@ define pcodeop VectorMultiplyAddLongScalar; # xfc000d00/mask=xffb00f10 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=111111000.00........1101...0.... -:vdot.bf16 Dd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2327=0x18 & c2021=0 & c0811=0xd & Q6=0 & c0404=0 ) | +:vdot.bf16 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0 & c0811=0xd & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x18 & thv_c2021=0 & thv_c0811=0xd & thv_Q6=0 & thv_c0404=0 ) ) & Dm & Dn & Dd { Dd = VectorDotProduct(Dn,Dm); } -:vdot.bf16 Qd, Qn, Qm is ( ( $(AMODE) & cond=15 & c2327=0x18 & c2021=0 & c0811=0xd & Q6=1 & c0404=0 ) | +:vdot.bf16 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0 & c0811=0xd & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x18 & thv_c2021=0 & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & Qm & Qn & Qd { Qd = VectorDotProduct(Qn,Qm); @@ -2428,13 +2427,13 @@ define pcodeop VectorMultiplyAddLongScalar; Mindex: "["^M5^"]" is TMode=0 & M5 { local idx:1 = M5:1; export idx; } Mindex: "["^thv_M5^"]" is TMode=1 & thv_M5 { local idx:1 = thv_M5:1; export idx; } -:vdot.bf16 Dd, Dn, Dm0^Mindex is ( ( $(AMODE) & cond=15 & c2327=0x1c & c2021=0 & c0811=0xd & Q6=0 & c0404=0 ) | +:vdot.bf16 Dd, Dn, Dm0^Mindex is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0 & c0811=0xd & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=0xd & thv_Q6=0 & thv_c0404=0 ) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorDotProduct(Dn,Dm0,Mindex); } -:vdot.bf16 Qd, Qn, Qm0^Mindex is ( ( $(AMODE) & cond=15 & c2327=0x1c & c2021=0 & c0811=0xd & Q6=1 & c0404=0 ) | +:vdot.bf16 Qd, Qn, Qm0^Mindex is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0 & c0811=0xd & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & Qm0 & Mindex & Qn & Qd { Qd = VectorDotProduct(Qn,Qm0,Mindex); @@ -2488,13 +2487,13 @@ vdupDm16: vdupDm is vdupDm export val; } -:vdup.^vdupSize Dd,vdupDm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & vdupSize & c0711=0x18 & Q6=0 & c0404=0 ) | +:vdup.^vdupSize Dd,vdupDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & vdupSize & c0711=0x18 & Q6=0 & c0404=0 ) | ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c0711=0x18 & thv_Q6=0 & thv_c0404=0 ) ) & Dd & vdupDm { Dd = vdupDm; } -:vdup.^vdupSize Qd,vdupDm16 is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & vdupSize & c0711=0x18 & Q6=1 & c0404=0 ) | +:vdup.^vdupSize Qd,vdupDm16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & vdupSize & c0711=0x18 & Q6=1 & c0404=0 ) | ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c0711=0x18 & thv_Q6=1 & thv_c0404=0) ) & Qd & vdupDm16 { Qd = vdupDm16; @@ -2555,14 +2554,14 @@ vdupRd16: vdupRd8 is vdupRd8 Qn = vdupRd16; } -:veor Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x6 & c2021=0 & c0811=1 & Q6=0 & c0404=1) | +:veor Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x6 & c2021=0 & c0811=1 & Q6=0 & c0404=1) | ($(TMODE_F) &thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = Dn ^ Dm; } -:veor Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x6 & c2021=0 & c0811=1 & Q6=1 & c0404=1) | +:veor Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x6 & c2021=0 & c0811=1 & Q6=1 & c0404=1) | ($(TMODE_F) &thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qd & Qn & Qm { Qd = Qn ^ Qm; @@ -2571,7 +2570,7 @@ vdupRd16: vdupRd8 is vdupRd8 extImm: "#"^c0811 is TMode=0 & c0811 { tmp:1 = c0811; export tmp; } extImm: "#"^thv_c0811 is TMode=1 & thv_c0811 { tmp:1 = thv_c0811; export tmp; } -:vext.8 Dd,Dn,Dm,extImm is ( ( $(AMODE) & cond=15 & c2327=5 & c2021=3 & c0606=0 & c0404=0 ) | +:vext.8 Dd,Dn,Dm,extImm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021=3 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=3 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dn & Dm & extImm { val:16 = (zext(Dm) << 64) | zext(Dn); @@ -2580,7 +2579,7 @@ extImm: "#"^thv_c0811 is TMode=1 & thv_c0811 { tmp:1 = thv_c0811; export tmp; } Dd = val:8; } -:vext.8 Qd,Qn,Qm,extImm is ( ( $(AMODE) & cond=15 & c2327=5 & c2021=3 & c0606=1 & c0404=0 ) | +:vext.8 Qd,Qn,Qm,extImm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021=3 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=3 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qn & Qm & extImm { val:32 = (zext(Qm) << 128) | zext(Qn); @@ -2589,27 +2588,27 @@ extImm: "#"^thv_c0811 is TMode=1 & thv_c0811 { tmp:1 = thv_c0811; export tmp; } Qd = val:16; } -:vfma.f^fesize2021 Dd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2327=4 & c0811=0xc & c0606=0 & c0404=1 ) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=0xc & thv_c0606=0 & thv_c0404=1) ) & fesize2021 & Dd & Dn & Dm +:vfma.f^fesize2020 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=0xc & c0606=0 & c0404=1 ) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=0xc & thv_c0606=0 & thv_c0404=1) ) & fesize2020 & Dd & Dn & Dm { - Dd = vectorFusedMultiplyAccumulate(Dn, Dm, fesize2021); + Dd = vectorFusedMultiplyAccumulate(Dn, Dm, fesize2020); } -:vfma.f^fesize2021 Qd,Qn,Qm is ( ( $(AMODE) & cond=15 & c2327=4 & c0811=0xc & c0606=1 & c0404=1 ) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=0xc & thv_c0606=1 & thv_c0404=1) ) & fesize2021 & Qd & Qn & Qm +:vfma.f^fesize2020 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=0xc & c0606=1 & c0404=1 ) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=0xc & thv_c0606=1 & thv_c0404=1) ) & fesize2020 & Qd & Qn & Qm { - Qd = vectorFusedMultiplyAccumulate(Qn, Qm, fesize2021); + Qd = vectorFusedMultiplyAccumulate(Qn, Qm, fesize2020); } # Floating-point Multiply-Accumulate BFloat (vector) -:vfmab.BF16 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=0x3 & c0811=8 & c0606=0 & c0404=1 ) | +:vfmab.BF16 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0x3 & c0811=8 & c0606=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=0x3 & thv_c0811=8 & thv_c0606=0 & thv_c0404=1) ) & Qd & Qn & Qm { Qd = BfloatMultiplyAccumulate(Qn, Qm, 0:1); } -:vfmat.BF16 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=0x3 & c0811=8 & c0606=1 & c0404=1 ) | +:vfmat.BF16 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0x3 & c0811=8 & c0606=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=0x3 & thv_c0811=8 & thv_c0606=1 & thv_c0404=1) ) & Qd & Qn & Qm { Qd = BfloatMultiplyAccumulate(Qn, Qm, 1:1); @@ -2620,37 +2619,37 @@ vmfDm: thv_Dm_3^"["^index^"]" is TMode=1 & thv_Dm_3 & thv_M5 & thv_c0303 [ index vmfSm: Sm_3^"["^c0303^"]" is TMode=0 & c0404=1 & Sm_3 & M5 & c0303 { el:4 = VectorGetElement(Sm_3, M5:1, 4:1, 0:1); export el; } vmfSm: Sm_3^"["^c0303^"]" is TMode=1 & thv_c0404=1 & Sm_3 & thv_M5 & c0303 { el:4 = VectorGetElement(Sm_3, thv_M5:1, 4:1, 0:1); export el; } -:vfmab.BF16 Qd,Qn,vmfDm is ( ($(AMODE) & cond=15 & c2327=0x1c & c2021=0x3 & c0811=8 & c0606=0 & c0404=1 ) | +:vfmab.BF16 Qd,Qn,vmfDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0x3 & c0811=8 & c0606=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=0x3 & thv_c0811=8 & thv_c0606=0 & thv_c0404=1) ) & Qd & Qn & vmfDm { Qd = BfloatMultiplyAccumulate(Qn, vmfDm, 0:1); } -:vfmat.BF16 Qd,Qn,vmfDm is ( ($(AMODE) & cond=15 & c2327=0x1c & c2021=0x3 & c0811=8 & c0606=1 & c0404=1 ) | +:vfmat.BF16 Qd,Qn,vmfDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0x3 & c0811=8 & c0606=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=0x3 & thv_c0811=8 & thv_c0606=1 & thv_c0404=1) ) & Qd & Qn & vmfDm { Qd = BfloatMultiplyAccumulate(Qn, vmfDm, 1:1); } -:vfmal.F16 Dd,Sn,Sm is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=0x2 & c0811=0x8 & c0606=0 & c0404=1 ) | +:vfmal.F16 Dd,Sn,Sm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0x2 & c0811=0x8 & c0606=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=0x2 & thv_c0811=0x8 & thv_c0606=0 & thv_c0404=1) ) & Dd & Sn & Sm { Dd = VectorMultiplyAddLongVector(Sn, Sm, 0:1); } -:vfmal.F16 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=0x2 & c0811=0x8 & c0606=1 & c0404=1 ) | +:vfmal.F16 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0x2 & c0811=0x8 & c0606=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=0x2 & thv_c0811=0x8 & thv_c0606=1 & thv_c0404=1) ) & Qd & Dn & Dm { Qd = VectorMultiplyAddLongVector(Dn, Dm, 1:1); } -:vfmal.F16 Dd,Sn,vmfSm is ( ($(AMODE) & cond=15 & c2327=0x1c & c2021=0x2 & c0811=8 & c0606=0 & c0404=1 ) | +:vfmal.F16 Dd,Sn,vmfSm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0x2 & c0811=8 & c0606=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=0x2 & thv_c0811=8 & thv_c0606=0 & thv_c0404=1) ) & Dd & Sn & vmfSm { Dd = VectorMultiplyAddLongScalar(Sn, vmfSm, 0:1); } -:vfmal.F16 Qd,Dn,vmfDm is ( ($(AMODE) & cond=15 & c2327=0x1c & c2021=0x2 & c0811=8 & c0606=1 & c0404=1 ) | +:vfmal.F16 Qd,Dn,vmfDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0x2 & c0811=8 & c0606=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=0x2 & thv_c0811=8 & thv_c0606=1 & thv_c0404=1) ) & Qd & Dn & vmfDm { Qd = VectorMultiplyAddLongScalar(Dn, vmfDm, 1:1); @@ -2659,44 +2658,44 @@ vmfSm: Sm_3^"["^c0303^"]" is TMode=1 & thv_c0404=1 & Sm_3 & thv_M5 & c0303 { el -:vhadd.^udt^esize2021 Dd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=0 & Q6=0 & c0404=0) | +:vhadd.^udt^esize2021 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=0 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=0 & thv_Q6=0 & thv_c0404=0) ) & udt & Dm & esize2021 & Dn & Dd { Dd = VectorHalvingAdd(Dn,Dm,esize2021,udt); } -:vhadd.^udt^esize2021 Qd,Qn,Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=0 & Q6=1 & c0404=0) | +:vhadd.^udt^esize2021 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=0 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=0 & thv_Q6=1 & thv_c0404=0) ) & udt & Qm & esize2021 & Qn & Qd { Qd = VectorHalvingAdd(Qn,Qm,esize2021,udt); } -:vraddhn.i^esize2021x2 Dd,Qn,Qm is (($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=4 & Q6=0 & c0404=0) | +:vraddhn.i^esize2021x2 Dd,Qn,Qm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=4 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=4 & thv_Q6=0 & thv_c0404=0) ) & Qm & esize2021x2 & Qn & Dd { Dd = VectorRoundAddAndNarrow(Qn,Qm,esize2021x2); } -:vrhadd.^udt^esize2021 Dd,Dn,Dm is (($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=1 & Q6=0 & c0404=0) | +:vrhadd.^udt^esize2021 Dd,Dn,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=1 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=1 & thv_Q6=0 & thv_c0404=0) ) & udt & Dm & esize2021 & Dn & Dd { Dd = VectorRoundHalvingAdd(Dn,Dm,esize2021,udt); } -:vrhadd.^udt^esize2021 Qd,Qn,Qm is (($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=1 & Q6=1 & c0404=0) | +:vrhadd.^udt^esize2021 Qd,Qn,Qm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=1 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=1 & thv_Q6=1 & thv_c0404=0) ) & udt & Qm & esize2021 & Qn & Qd { Qd = VectorRoundHalvingAdd(Qn,Qm,esize2021,udt); } -:vhsub.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=2 & Q6=0 & c0404=0) | +:vhsub.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=2 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=2 & thv_Q6=0 & thv_c0404=0) ) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorHalvingSubtract(Dn,Dm,esize2021,udt); } -:vhsub.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=2 & Q6=1 & c0404=0) | +:vhsub.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=2 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=2 & thv_Q6=1 & thv_c0404=0) ) & udt & Qm & esize2021 & Qn & Qd { Qd = VectorHalvingSubtract(Qn,Qm,esize2021,udt); @@ -2708,73 +2707,73 @@ vmfSm: Sm_3^"["^c0303^"]" is TMode=1 & thv_c0404=1 & Sm_3 & thv_M5 & c0303 { el @if defined(VFPv4) -:vfma^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=1 & c0606=0 & c0404=0 ) | +:vfma^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=1 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=1 & thv_c0606=0 & thv_c0404=0)) & Sm & Sn & Sd { Sd = zext(Sd:2 f+ (Sn:2 f* Sm:2)); } -:vfma^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=2 & c0606=0 & c0404=0 ) | +:vfma^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=2 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=2 & thv_c0606=0 & thv_c0404=0)) & Sm & Sn & Sd { Sd = Sd f+ (Sn f* Sm); } -:vfma^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=3 & c0606=0 & c0404=0) | +:vfma^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=3 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=3 & thv_c0606=0 & thv_c0404=0)) & Dm & Dn & Dd { Dd = Dd f+ (Dn f* Dm); } -:vfms^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=1 & c0606=1 & c0404=0) | +:vfms^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=1 & thv_c0606=1 & thv_c0404=0)) & Sm & Sn & Sd { Sd = zext(Sd:2 f+ ((f- Sn:2) f* Sm:2)); } -:vfms^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=2 & c0606=1 & c0404=0) | +:vfms^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=2 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=2 & thv_c0606=1 & thv_c0404=0)) & Sm & Sn & Sd { Sd = Sd f+ ((f- Sn) f* Sm); } -:vfms^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=3 & c0606=1 & c0404=0 ) | +:vfms^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=3 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=3 & thv_c0606=1 & thv_c0404=0)) & Dm & Dn & Dd { Dd = Dd f+ ((f- Dn) f* Dm); } -:vfnma^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=1 & c0606=1 & c0404=0 ) | +:vfnma^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=1 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=1 & thv_c0606=1 & thv_c0404=0)) & Sm & Sn & Sd { Sd = zext((f- Sd:2) f+ ((f- Sn:2) f* Sm:2)); } -:vfnma^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=2 & c0606=1 & c0404=0 ) | +:vfnma^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=2 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=2 & thv_c0606=1 & thv_c0404=0)) & Sm & Sn & Sd { Sd = (f- Sd) f+ ((f- Sn) f* Sm); } -:vfnma^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=3 & c0606=1 & c0404=0) | +:vfnma^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=3 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=3 & thv_c0606=1 & thv_c0404=0)) & Dm & Dn & Dd { Dd = (f- Dd) f+ ((f- Dn) f* Dm); } -:vfnms^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=1 & c0606=0 & c0404=0 ) | +:vfnms^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=1 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=1 & thv_c0606=0 & thv_c0404=0)) & Sm & Sn & Sd { Sd = zext((f- Sd:2) f+ (Sn:2 f* Sm:2)); } -:vfnms^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=2 & c0606=0 & c0404=0 ) | +:vfnms^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=2 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=2 & thv_c0606=0 & thv_c0404=0)) & Sm & Sn & Sd { Sd = (f- Sd) f+ (Sn f* Sm); } -:vfnms^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=3 & c0606=0 & c0404=0 ) | +:vfnms^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=3 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=3 & thv_c0606=0 & thv_c0404=0)) & Dm & Dn & Dd { Dd = (f- Dd) f+ (Dn f* Dm); @@ -2822,14 +2821,14 @@ RnAligned45: "["^VRn^vldAlign45^"]" is TMode=0 & VRn & vldAlign45 { export VRn; RnAligned45: "["^VRn^vldAlign45^"]" is TMode=1 & VRn & vldAlign45 { export VRn; } -:vld1.^esize0607 vld1DdList,RnAligned45 is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0003=15 & $(Vld1DdList)) | +:vld1.^esize0607 vld1DdList,RnAligned45 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0003=15 & $(Vld1DdList)) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0003=15 & $(thv_Vld1DdList)) ) & esize0607 & RnAligned45 & vld1DdList { mult_addr = RnAligned45; build vld1DdList; } -:vld1.^esize0607 vld1DdList,RnAligned45^"!" is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0003=13 & $(Vld1DdList)) | +:vld1.^esize0607 vld1DdList,RnAligned45^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0003=13 & $(Vld1DdList)) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0003=13 & $(thv_Vld1DdList)) ) & esize0607 & RnAligned45 & vld1DdList { mult_addr = RnAligned45; @@ -2837,7 +2836,7 @@ RnAligned45: "["^VRn^vldAlign45^"]" is TMode=1 & VRn & vldAlign45 { export VRn; RnAligned45 = RnAligned45 + (8 * vld1DdList); } -:vld1.^esize0607 vld1DdList,RnAligned45,VRm is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & $(Vld1DdList)) | +:vld1.^esize0607 vld1DdList,RnAligned45,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & $(Vld1DdList)) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & $(thv_Vld1DdList)) ) & VRm & esize0607 & RnAligned45 & vld1DdList { mult_addr = RnAligned45; @@ -2882,14 +2881,14 @@ vld1Align2: "@32" is TMode=1 & thv_c1011=2 & thv_c0404=1 { } RnAligned2: "["^VRn^vld1Align2^"]" is VRn & vld1Align2 { export VRn; } -:vld1.^esize1011 vld1DdElement2,RnAligned2 is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0809=0 & c0003=15 & $(Vld1DdElement2) ) | +:vld1.^esize1011 vld1DdElement2,RnAligned2 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0809=0 & c0003=15 & $(Vld1DdElement2) ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0809=0 & thv_c0003=15 & $(T_Vld1DdElement2) ) ) & RnAligned2 & esize1011 & vld1DdElement2 { mult_addr = RnAligned2; build vld1DdElement2; } -:vld1.^esize1011 vld1DdElement2,RnAligned2^"!" is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0809=0 & c0003=13 & $(Vld1DdElement2) ) | +:vld1.^esize1011 vld1DdElement2,RnAligned2^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0809=0 & c0003=13 & $(Vld1DdElement2) ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0809=0 & thv_c0003=13 & $(T_Vld1DdElement2) ) ) & RnAligned2 & esize1011 & vld1DdElement2 { mult_addr = RnAligned2; @@ -2897,7 +2896,7 @@ RnAligned2: "["^VRn^vld1Align2^"]" is VRn & vld1Align2 { export VRn; } RnAligned2 = RnAligned2 + esize1011; } -:vld1.^esize1011 vld1DdElement2,RnAligned2,VRm is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0809=0 & $(Vld1DdElement2) ) | +:vld1.^esize1011 vld1DdElement2,RnAligned2,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0809=0 & $(Vld1DdElement2) ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0809=0 & $(T_Vld1DdElement2) ) ) & VRm & RnAligned2 & esize1011 & vld1DdElement2 { mult_addr = RnAligned2; @@ -2953,20 +2952,20 @@ RnAligned3: "["^Rn^vld1Align3^"]" is Rn & vld1Align3 { export Rn; } @define vld1Constrain "((c0607=0 & c0404=0) | c0607=1 | c0607=2)" -:vld1.^esize0607 vld1DdList3,RnAligned3 is $(AMODE) & cond=15 & c2327=9 & c2021=2 & RnAligned3 & vld1RnReplicate & vld1DdList3 & c0811=12 & esize0607 & c0003=15 & $(vld1Constrain) +:vld1.^esize0607 vld1DdList3,RnAligned3 is $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & RnAligned3 & vld1RnReplicate & vld1DdList3 & c0811=12 & esize0607 & c0003=15 & $(vld1Constrain) { mult_dat8 = vld1RnReplicate; build vld1DdList3; } -:vld1.^esize0607 vld1DdList3,RnAligned3^"!" is $(AMODE) & cond=15 & c2327=9 & c2021=2 & RnAligned3 & vld1RnReplicate & vld1DdList3 & c0811=12 & esize0607 & c0003=13 & $(vld1Constrain) +:vld1.^esize0607 vld1DdList3,RnAligned3^"!" is $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & RnAligned3 & vld1RnReplicate & vld1DdList3 & c0811=12 & esize0607 & c0003=13 & $(vld1Constrain) { mult_dat8 = vld1RnReplicate; build vld1DdList3; RnAligned3 = RnAligned3 + vld1DdList3; } -:vld1.^esize0607 vld1DdList3,RnAligned3,VRm is $(AMODE) & cond=15 & c2327=9 & c2021=2 & RnAligned3 & vld1RnReplicate & vld1DdList3 & c0811=12 & esize0607 & VRm & $(vld1Constrain) +:vld1.^esize0607 vld1DdList3,RnAligned3,VRm is $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & RnAligned3 & vld1RnReplicate & vld1DdList3 & c0811=12 & esize0607 & VRm & $(vld1Constrain) { mult_dat8 = vld1RnReplicate; build vld1DdList3; @@ -3112,14 +3111,14 @@ vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=1 & thv_c0811=3 & @define Vld2DdList "(c0811=3 | c0811=8 | c0811=9)" @define thv_Vld2DdList "(thv_c0811=3 | thv_c0811=8 | thv_c0811=9)" -:vld2.^esize0607 vld2DdList,RnAligned45 is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003=15 & $(Vld2DdList) ) | +:vld2.^esize0607 vld2DdList,RnAligned45 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003=15 & $(Vld2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0607<3 & thv_c0003=15 & $(thv_Vld2DdList) ) ) & RnAligned45 & esize0607 & vld2DdList { mult_addr = RnAligned45; build vld2DdList; } -:vld2.^esize0607 vld2DdList,RnAligned45^"!" is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003=13 & $(Vld2DdList) ) | +:vld2.^esize0607 vld2DdList,RnAligned45^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003=13 & $(Vld2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0607<3 & thv_c0003=13 & $(thv_Vld2DdList) ) ) & RnAligned45 & esize0607 & vld2DdList { mult_addr = RnAligned45; @@ -3127,7 +3126,7 @@ vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=1 & thv_c0811=3 & RnAligned45 = RnAligned45 + (8 * vld2DdList); } -:vld2.^esize0607 vld2DdList,RnAligned45,VRm is ( ($(AMODE) & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003 & $(Vld2DdList) ) | +:vld2.^esize0607 vld2DdList,RnAligned45,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003 & $(Vld2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0607<3 & thv_c0003 & $(thv_Vld2DdList) ) ) & VRm & RnAligned45 & esize0607 & vld2DdList { mult_addr = RnAligned45; @@ -3167,15 +3166,15 @@ vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVl vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVld2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double -:vld2.^esize1011 vld2DdList2,vld2RnAligned2 is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003=15 ) | +:vld2.^esize1011 vld2DdList2,vld2RnAligned2 is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003=15 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=1 & thv_c0003=15 ) ) & esize1011 & VRm & vld2RnAligned2 & vld2DdList2 unimpl -:vld2.^esize1011 vld2DdList2,vld2RnAligned2^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003=13 ) | +:vld2.^esize1011 vld2DdList2,vld2RnAligned2^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003=13 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=1 & thv_c0003=13 ) ) & esize1011 & VRm & vld2RnAligned2 & vld2DdList2 unimpl -:vld2.^esize1011 vld2DdList2,vld2RnAligned2,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003 ) | +:vld2.^esize1011 vld2DdList2,vld2RnAligned2,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=1 & thv_c0003 ) ) & esize1011 & VRm & vld2RnAligned2 & vld2DdList2 unimpl @@ -3203,15 +3202,15 @@ vld2DdList3: "{"^buildVld2DdList3^"}" is TMode=0 & c0505=1 & D22 & c1215 & build vld2DdList3: "{"^buildVld2DdList3^"}" is TMode=1 & thv_c0505=0 & thv_D22 & thv_c1215 & buildVld2DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single vld2DdList3: "{"^buildVld2DdList3^"}" is TMode=1 & thv_c0505=1 & thv_D22 & thv_c1215 & buildVld2DdList3 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double -:vld2.^esize0607 vld2DdList3,vld2RnAligned3 is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003=15 ) | +:vld2.^esize0607 vld2DdList3,vld2RnAligned3 is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003=15 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=13 & thv_c0607<3 & thv_c0003=15 ) ) & esize0607 & VRm & vld2RnAligned3 & vld2DdList3 unimpl -:vld2.^esize0607 vld2DdList3,vld2RnAligned3^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003=13 ) | +:vld2.^esize0607 vld2DdList3,vld2RnAligned3^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003=13 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=13 & thv_c0607<3 & thv_c0003=13 ) ) & esize0607 & VRm & vld2RnAligned3 & vld2DdList3 unimpl -:vld2.^esize0607 vld2DdList3,vld2RnAligned3,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003) | +:vld2.^esize0607 vld2DdList3,vld2RnAligned3,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=13 & thv_c0607<3 & thv_c0003 ) ) & esize0607 & VRm & vld2RnAligned3 & vld2DdList3 unimpl @@ -3236,13 +3235,13 @@ vld3DdList: "{"^buildVld3DdList^"}" is TMode=0 & c0811=5 & D22 & c1215 & buildVl vld3DdList: "{"^buildVld3DdList^"}" is TMode=1 & thv_c0811=4 & thv_D22 & thv_c1215 & buildVld3DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single vld3DdList: "{"^buildVld3DdList^"}" is TMode=1 & thv_c0811=5 & thv_D22 & thv_c1215 & buildVld3DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double -:vld3.^esize0607 vld3DdList,vld3RnAligned is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 & c0003=15 ) | +:vld3.^esize0607 vld3DdList,vld3RnAligned is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 & c0003=15 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & (thv_c0811=4 | thv_c0811=5) & thv_c0607<3 & thv_c0505=0 & thv_c0003=15) ) & vld3RnAligned & esize0607 & vld3DdList unimpl -:vld3.^esize0607 vld3DdList,vld3RnAligned^"!" is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 & c0003=13 ) | +:vld3.^esize0607 vld3DdList,vld3RnAligned^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 & c0003=13 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & (thv_c0811=4 | thv_c0811=5) & thv_c0607<3 & thv_c0505=0 & thv_c0003=13) ) & vld3RnAligned & esize0607 & vld3DdList unimpl -:vld3.^esize0607 vld3DdList,vld3RnAligned,VRm is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 ) | +:vld3.^esize0607 vld3DdList,vld3RnAligned,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & (thv_c0811=4 | thv_c0811=5) & thv_c0607<3 & thv_c0505=0 ) ) & VRm & vld3RnAligned & esize0607 & vld3DdList unimpl ####### @@ -3268,13 +3267,13 @@ vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVl vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0405=2) | (thv_c1011=2 & thv_c0406=4)) & thv_D22 & thv_c1215 & buildVld3DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double -:vld3.^esize1011 vld3DdList2,vld3Rn is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2 & c0003=15) | +:vld3.^esize1011 vld3DdList2,vld3Rn is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2 & c0003=15) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=2 & thv_c0003=15) ) & vld3Rn & esize1011 & vld3DdList2 unimpl -:vld3.^esize1011 vld3DdList2,vld3Rn^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2 & c0003=13) | +:vld3.^esize1011 vld3DdList2,vld3Rn^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2 & c0003=13) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=2 & thv_c0003=13) ) & vld3Rn & esize1011 & vld3DdList2 unimpl -:vld3.^esize1011 vld3DdList2,vld3Rn,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2) | +:vld3.^esize1011 vld3DdList2,vld3Rn,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=2) ) & VRm & vld3Rn & esize1011 & vld3DdList2 unimpl ####### @@ -3290,13 +3289,13 @@ vld3DdList3: "{"^buildVld3DdList3^"}" is TMode=0 & c0505=1 & D22 & c1215 & build vld3DdList3: "{"^buildVld3DdList3^"}" is TMode=1 & thv_c0505=0 & thv_D22 & thv_c1215 & buildVld3DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single vld3DdList3: "{"^buildVld3DdList3^"}" is TMode=1 & thv_c0505=1 & thv_D22 & thv_c1215 & buildVld3DdList3 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double -:vld3.^esize0607 vld3DdList3,vld3Rn is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0 & c0003=15) | +:vld3.^esize0607 vld3DdList3,vld3Rn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0 & c0003=15) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=14 & thv_c0404=0 & thv_c0003=15) ) & vld3Rn & esize0607 & vld3DdList3 unimpl -:vld3.^esize0607 vld3DdList3,vld3Rn^"!" is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0 & c0003=13) | +:vld3.^esize0607 vld3DdList3,vld3Rn^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0 & c0003=13) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=14 & thv_c0404=0 & thv_c0003=13) ) & vld3Rn & esize0607 & vld3DdList3 unimpl -:vld3.^esize0607 vld3DdList3,vld3Rn,VRm is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0) | +:vld3.^esize0607 vld3DdList3,vld3Rn,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=14 & thv_c0404=0) ) & VRm & vld3Rn & esize0607 & vld3DdList3 unimpl @@ -3332,15 +3331,15 @@ vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVl vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVld4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double -:vld4.^esize1011 vld4DdList2,vld4RnAligned2 is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003=15) | +:vld4.^esize1011 vld4DdList2,vld4RnAligned2 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003=15) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=3 & thv_c0003=15 ) ) & esize1011 & vld4RnAligned2 & vld4DdList2 unimpl -:vld4.^esize1011 vld4DdList2,vld4RnAligned2^"!" is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003=13) | +:vld4.^esize1011 vld4DdList2,vld4RnAligned2^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003=13) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=3 & thv_c0003=13 ) ) & esize1011 & vld4RnAligned2 & vld4DdList2 unimpl -:vld4.^esize1011 vld4DdList2,vld4RnAligned2,VRm is ( ($(AMODE) & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003) | +:vld4.^esize1011 vld4DdList2,vld4RnAligned2,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=3 & thv_c0003 ) ) & esize1011 & VRm & vld4RnAligned2 & vld4DdList2 unimpl @@ -3362,12 +3361,12 @@ buildVld4DdList3: Dreg^"[]",buildVld4DdList3 is Dreg & buildVld4DdList3 [ count vld4DdList3: "{"^buildVld4DdList3^"}" is c0505=0 & D22 & c1215 & buildVld4DdList3 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single vld4DdList3: "{"^buildVld4DdList3^"}" is c0505=1 & D22 & c1215 & buildVld4DdList3 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double -:vld4.^esize0607 vld4DdList3,vld4RnAligned3 is $(AMODE) & cond=15 & c2327=9 & c2021=2 & vld4RnAligned3 & c0811=15 & esize0607 & c0003=15 & vld4DdList3 unimpl +:vld4.^esize0607 vld4DdList3,vld4RnAligned3 is $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & vld4RnAligned3 & c0811=15 & esize0607 & c0003=15 & vld4DdList3 unimpl #thv_2327=0x12 -:vld4.^esize0607 vld4DdList3,vld4RnAligned3^"!" is $(AMODE) & cond=15 & c2327=9 & c2021=2 & vld4RnAligned3 & c0811=15 & esize0607 & c0003=13 & vld4DdList3 unimpl +:vld4.^esize0607 vld4DdList3,vld4RnAligned3^"!" is $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & vld4RnAligned3 & c0811=15 & esize0607 & c0003=13 & vld4DdList3 unimpl -:vld4.^esize0607 vld4DdList3,vld4RnAligned3,VRm is $(AMODE) & cond=15 & c2327=9 & c2021=2 & vld4RnAligned3 & c0811=15 & esize0607 & VRm & vld4DdList3 unimpl +:vld4.^esize0607 vld4DdList3,vld4RnAligned3,VRm is $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & vld4RnAligned3 & c0811=15 & esize0607 & VRm & vld4DdList3 unimpl ####### # VLD4 (multiple 4-element structures) @@ -3393,15 +3392,15 @@ vld4DdList: "{"^buildVld4DdList^"}" is TMode=0 & c0808=1 & D22 & c1215 & buildVl vld4DdList: "{"^buildVld4DdList^"}" is TMode=1 & thv_c0808=0 & thv_D22 & thv_c1215 & buildVld4DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single vld4DdList: "{"^buildVld4DdList^"}" is TMode=1 & thv_c0808=1 & thv_D22 & thv_c1215 & buildVld4DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double -:vld4.^esize0607 vld4DdList,vld4RnAligned is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3 & c0003=15 ) | +:vld4.^esize0607 vld4DdList,vld4RnAligned is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3 & c0003=15 ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0911=0 & thv_c0607<3 & thv_c0003=15 ) ) & esize0607 & VRm & vld4RnAligned & vld4DdList unimpl -:vld4.^esize0607 vld4DdList,vld4RnAligned^"!" is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3 & c0003=13 ) | +:vld4.^esize0607 vld4DdList,vld4RnAligned^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3 & c0003=13 ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0911=0 & thv_c0607<3 & thv_c0003=13 ) ) & esize0607 & VRm & vld4RnAligned & vld4DdList unimpl -:vld4.^esize0607 vld4DdList,vld4RnAligned,VRm is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3) | +:vld4.^esize0607 vld4DdList,vld4RnAligned,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0911=0 & thv_c0607<3 ) ) & esize0607 & VRm & vld4RnAligned & vld4DdList unimpl @@ -3724,12 +3723,16 @@ vldrRn: "["^Rn^"]" is TMode=0 & Rn & immed=0 & c2323=0 { ptr:4 = Rn; expor vldrRn: "["^Rn^"]" is TMode=0 & Rn & immed=0 & c2323=1 { ptr:4 = Rn; export ptr; } vldrRn: "["^Rn^",#-"^vldrImm^"]" is TMode=0 & Rn & immed & c2323=0 [ vldrImm = immed * 4; ] { ptr:4 = Rn - vldrImm; export ptr; } vldrRn: "["^Rn^",#"^vldrImm^"]" is TMode=0 & Rn & immed & c2323=1 [ vldrImm = immed * 4; ] { ptr:4 = Rn + vldrImm; export ptr; } +vldrRn: "["^pc^"]" is TMode=0 & Rn=15 & pc & immed=0 & c2323=0 { ptr:4 = ((inst_start + 8) & 0xfffffffc); export ptr; } +vldrRn: "["^pc^"]" is TMode=0 & Rn=15 & pc & immed=0 & c2323=1 { ptr:4 = ((inst_start + 8) & 0xfffffffc); export ptr; } vldrRn: "["^pc^",#-"^vldrImm^"]" is TMode=0 & Rn=15 & pc & immed & c2323=0 [ vldrImm = immed * 4; ] { ptr:4 = ((inst_start + 8) & 0xfffffffc) - vldrImm; export ptr; } vldrRn: "["^pc^",#"^vldrImm^"]" is TMode=0 & Rn=15 & pc & immed & c2323=1 [ vldrImm = immed * 4; ] { ptr:4 = ((inst_start + 8) & 0xfffffffc) + vldrImm; export ptr; } vldrRn: "["^VRn^"]" is TMode=1 & VRn & thv_immed=0 & thv_c2323=0 { ptr:4 = VRn; export ptr; } vldrRn: "["^VRn^"]" is TMode=1 & VRn & thv_immed=0 & thv_c2323=1 { ptr:4 = VRn; export ptr; } vldrRn: "["^VRn^",#-"^vldrImm^"]" is TMode=1 & VRn & thv_immed & thv_c2323=0 [ vldrImm = thv_immed * 4; ] { ptr:4 = VRn - vldrImm; export ptr; } vldrRn: "["^VRn^",#"^vldrImm^"]" is TMode=1 & VRn & thv_immed & thv_c2323=1 [ vldrImm = thv_immed * 4; ] { ptr:4 = VRn + vldrImm; export ptr; } +vldrRn: "["^pc^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed=0 & thv_c2323=0 { ptr:4 = ((inst_start + 4) & 0xfffffffc); export ptr; } +vldrRn: "["^pc^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed=0 & thv_c2323=1 { ptr:4 = ((inst_start + 4) & 0xfffffffc); export ptr; } vldrRn: "["^pc^",#-"^vldrImm^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed & thv_c2323=0 [ vldrImm = thv_immed * 4; ] { ptr:4 = ((inst_start + 4) & 0xfffffffc) - vldrImm; export ptr; } vldrRn: "["^pc^",#"^vldrImm^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed & thv_c2323=1 [ vldrImm = thv_immed * 4; ] { ptr:4 = ((inst_start + 4) & 0xfffffffc) + vldrImm; export ptr; } @@ -3762,114 +3765,114 @@ define pcodeop FloatVectorMultiplySubtract; @if defined(SIMD) -:vmax.^udt^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=0 & c0404=0 ) | +:vmax.^udt^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=0 & c0404=0 ) | ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=0 & thv_c0404=0 ) ) & esize2021 & udt & Dm & Dn & Dd { Dd = VectorMax(Dn,Dm,esize2021,udt); } -:vmax.^udt^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=1 & c0404=0 ) | +:vmax.^udt^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=1 & c0404=0 ) | ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & udt & Qm & Qn & Qd { Qd = VectorMax(Qn,Qm,esize2021,udt); } -:vmax.f32 Dd,Dn,Dm is (($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=15 & Q6=0 & c0404=0) | +:vmax.f32 Dd,Dn,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=0 & c0811=15 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0)) & Dm & Dn & Dd { Dd = FloatVectorMax(Dn,Dm,2:4,32:1); } -:vmax.f32 Qd,Qn,Qm is (($(AMODE) & cond=15 & c2327=4 & c2021=0 & c0811=15 & Q6=1 & c0404=0) | +:vmax.f32 Qd,Qn,Qm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=0 & c0811=15 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=15 & thv_Q6=1 & thv_c0404=0)) & Qm & Qn & Qd { Qd = FloatVectorMax(Qn,Qm,2:4,32:1); } -:vmin.^udt^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=0 & c0404=1 ) | +:vmin.^udt^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=0 & c0404=1 ) | ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=0 & thv_c0404=1 ) ) & esize2021 & udt & Dm & Dn & Dd { Dd = VectorMin(Dn,Dm,esize2021,udt); } -:vmin.^udt^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=1 & c0404=1 ) | +:vmin.^udt^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=1 & c0404=1 ) | ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=1 & thv_c0404=1 ) ) & esize2021 & udt & Qm & Qn & Qd { Qd = VectorMin(Qn,Qm,esize2021,udt); } -:vmin.f32 Dd,Dn,Dm is (($(AMODE) & cond=15 & c2327=4 & c2021=2 & c0811=15 & Q6=0 & c0404=0) | +:vmin.f32 Dd,Dn,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c0811=15 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0)) & Dm & Dn & Dd { Dd = FloatVectorMin(Dn,Dm,2:4,32:1); } -:vmin.f32 Qd,Qn,Qm is (($(AMODE) & cond=15 & c2327=4 & c2021=2 & c0811=15 & Q6=1 & c0404=0) | +:vmin.f32 Qd,Qn,Qm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c0811=15 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=15 & thv_Q6=1 & thv_c0404=0)) & Qm & Qn & Qd { Qd = FloatVectorMin(Qn,Qm,2:4,32:1); } -:vmla.i^esize2021 Dd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2327=4 & c2021<3 & c0811=9 & Q6=0 & c0404=0 ) | +:vmla.i^esize2021 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021<3 & c0811=9 & Q6=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dm & Dn & Dd { Dd = VectorMultiplyAccumulate(Dn,Dm,esize2021,0:1); } -:vmla.i^esize2021 Qd,Qn,Qm is ( ( $(AMODE) & cond=15 & c2327=4 & c2021<3 & c0811=9 & Q6=1 & c0404=0) | +:vmla.i^esize2021 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021<3 & c0811=9 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=1 & thv_c0404=0)) & esize2021 & Qm & Qn & Qd { Qd = VectorMultiplyAccumulate(Qn,Qm,esize2021,0:1); } -:vmls.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2424=1 & c2021<3 & c0811=9 & Q6=0 & c0404=0) | +:vmls.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2424=1 & c2021<3 & c0811=9 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dm & Dn & Dd { Dd = VectorMultiplySubtract(Dn,Dm,esize2021,0:1); } -:vmls.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2424=1 & c2021<3 & c0811=9 & Q6=1 & c0404=0) | +:vmls.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2424=1 & c2021<3 & c0811=9 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=1 & thv_c0404=0)) & esize2021 & Qm & Qn & Qd { Qd = VectorMultiplySubtract(Qn,Qm,esize2021,0:1); } -:vmlal.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=8 & Q6=0 & c0404=0) | +:vmlal.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=8 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=8 & thv_Q6=0 & thv_c0404=0 ) ) & Dm & Dn & Qd & udt & esize2021 { Qd = VectorMultiplyAccumulate(Dn,Dm,esize2021,udt); } -:vmlsl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=10 & Q6=0 & c0404=0) | +:vmlsl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=10 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=10 & thv_Q6=0 & thv_c0404=0 ) ) & Dm & Dn & Qd & udt & esize2021 { Qd = VectorMultiplySubtractLong(Dn,Dm,esize2021,udt); } -:vmla.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=0 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_c0606=0 & thv_c0404=1)) & fesize2021 & Dn & Dd & Dm +:vmla.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_c0606=0 & thv_c0404=1)) & fesize2020 & Dn & Dd & Dm { - Dd = FloatVectorMultiplyAccumulate(Dn,Dm,fesize2021,8:1); + Dd = FloatVectorMultiplyAccumulate(Dn,Dm,fesize2020,8:1); } -:vmla.f^fesize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=1 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_c0606=1 & thv_c0404=1)) & fesize2021 & Qn & Qd & Qm +:vmla.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_c0606=1 & thv_c0404=1)) & fesize2020 & Qn & Qd & Qm { - Qd = FloatVectorMultiplyAccumulate(Qn,Qm,fesize2021,16:1); + Qd = FloatVectorMultiplyAccumulate(Qn,Qm,fesize2020,16:1); } -:vmls.f^fesize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=0 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_c0606=0 & thv_c0404=1)) & fesize2021 & Dn & Dd & Dm +:vmls.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=0 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_c0606=0 & thv_c0404=1)) & fesize2020 & Dn & Dd & Dm { - Dd = FloatVectorMultiplySubtract(Dn,Dm,fesize2021,8:1); + Dd = FloatVectorMultiplySubtract(Dn,Dm,fesize2020,8:1); } -:vmls.f^fesize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=1 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_c0606=1 & thv_c0404=1)) & fesize2021 & Qn & Qd & Qm +:vmls.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=1 & c0404=1) | + ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_c0606=1 & thv_c0404=1)) & fesize2020 & Qn & Qd & Qm { - Qd = FloatVectorMultiplySubtract(Qn,Qm,fesize2021,16:1); + Qd = FloatVectorMultiplySubtract(Qn,Qm,fesize2020,16:1); } @endif # SIMD @@ -3914,49 +3917,49 @@ vmlDm: thv_Dm_3^"["^index^"]" is TMode=1 & thv_c2021=1 & thv_Dm_3 & thv_M5 & thv vmlDm: thv_Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & thv_Dm_4 & thv_M5 { el:4 = VectorGetElement(thv_Dm_4, thv_M5:1, 4:1, 0:1); export el; } -:vmla.i^esize2021 Dd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2) & c0811=0 & c0606=1 & c0404=0) | +:vmla.i^esize2021 Dd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2) & c0811=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Dn & Dd & vmlDm { Dd = VectorMultiplyAccumulate(Dn,vmlDm,esize2021); } -:vmla.i^esize2021 Qd,Qn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0 & c0606=1 & c0404=0) | +:vmla.i^esize2021 Qd,Qn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Qn & Qd & vmlDm { Qd = VectorMultiplyAccumulate(Qn,vmlDm,esize2021); } -:vmla.f32 Dd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=2 & c0811=1 & c0606=1 & c0404=0) | +:vmla.f32 Dd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=2 & c0811=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=1 & thv_c0606=1 & thv_c0404=0)) & Dn & Dd & vmlDm { Dd = FloatVectorMultiplyAccumulate(Dn,vmlDm,2:4,32:1); } -:vmla.f32 Qd,Qn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2021=2 & c0811=1 & c0606=1 & c0404=0) | +:vmla.f32 Qd,Qn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2021=2 & c0811=1 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=1 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & vmlDm { Qd = FloatVectorMultiplyAccumulate(Qn,vmlDm,2:4,32:1); } -:vmls.i^esize2021 Dd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2) & c0811=4 & c0606=1 & c0404=0) | +:vmls.i^esize2021 Dd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2) & c0811=4 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=4 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Dn & Dd & vmlDm { Dd = VectorMultiplySubtract(Dn,vmlDm,esize2021); } -:vmls.i^esize2021 Qd,Qn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2)& c0811=4 & c0606=1 & c0404=0) | +:vmls.i^esize2021 Qd,Qn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2)& c0811=4 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=4 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Qn & Qd & vmlDm { Qd = VectorMultiplySubtract(Qn,vmlDm,esize2021); } -:vmls.f32 Dd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=2 & c0811=5 & c0606=1 & c0404=0) | +:vmls.f32 Dd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=2 & c0811=5 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=5 & thv_c0606=1 & thv_c0404=0)) & Dn & Dd & vmlDm { Dd = FloatVectorMultiplySubtract(Dn,vmlDm,2:4,32:1); } -:vmls.f32 Qd,Qn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2021=2 & c0811=5 & c0606=1 & c0404=0) | +:vmls.f32 Qd,Qn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2021=2 & c0811=5 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=5 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & vmlDm { Qd = FloatVectorMultiplySubtract(Qn,vmlDm,2:4,32:1); @@ -3968,25 +3971,25 @@ vmlDm: thv_Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & thv_Dm_4 & thv_M5 -:vmlal.^udt^esize2021 Qd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=2 & Q6=1 & c0404=0) | +:vmlal.^udt^esize2021 Qd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=2 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=2 & thv_Q6=1 & thv_c0404=0 ) ) & udt & esize2021 & Dn & Qd & vmlDm { Qd = VectorMultiplyAccumulate(Dn,vmlDm,esize2021,udt); } -:vmlsl.^udt^esize2021 Qd,Dn,vmlDm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=6 & Q6=1 & c0404=0) | +:vmlsl.^udt^esize2021 Qd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=6 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=6 & thv_Q6=1 & thv_c0404=0 ) ) & udt & esize2021 & Dn & Qd & vmlDm { Qd = VectorMultiplySubtract(Dn,vmlDm,esize2021,udt); } -:vmov.^simdExpImmDT Dd,simdExpImm_8 is (( $(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0707=0 & Q6=0 & c0404=1 ) | +:vmov.^simdExpImmDT Dd,simdExpImm_8 is (( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0707=0 & Q6=0 & c0404=1 ) | ( $(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0707=0 & thv_Q6=0 & thv_c0404=1 )) & Dd & simdExpImmDT & simdExpImm_8 { Dd = simdExpImm_8; } -:vmov.^simdExpImmDT Qd,simdExpImm_16 is (( $(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0707=0 & Q6=1 & c0404=1 ) | +:vmov.^simdExpImmDT Qd,simdExpImm_16 is (( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0707=0 & Q6=1 & c0404=1 ) | ( $(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0707=0 & thv_Q6=1 & thv_c0404=1 )) & Qd & simdExpImmDT & simdExpImm_16 { Qd = simdExpImm_16; @@ -4014,13 +4017,13 @@ vmlDm: thv_Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & thv_Dm_4 & thv_M5 @if defined(SIMD) -:vmov Dd,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=2 & c1619=c0003 & c0811=1 & c0707=c0505 & Q6=0 & c0404=1 ) | +:vmov Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c1619=c0003 & c0811=1 & c0707=c0505 & Q6=0 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c1619=thv_c0003 & thv_c0811=1 & thv_c0707=thv_c0505 & thv_c0606=0 & thv_c0404=1) ) & Dd & Dm { Dd = Dm; } -:vmov Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=4 & c2021=2 & c1619=c0003 & c0811=1 & c0707=c0505 & Q6=1 & c0404=1 ) | +:vmov Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c1619=c0003 & c0811=1 & c0707=c0505 & Q6=1 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c1619=thv_c0003 & thv_c0811=1 & thv_c0707=thv_c0505 & thv_c0606=1 & thv_c0404=1) ) & Qd & Qm { Qd = Qm; @@ -4201,20 +4204,20 @@ define pcodeop VectorCopyNarrow; @if defined(SIMD) -:vmovl.^udt^esize2021 Qd,Dm is (($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c1921=1 | c1921=2 | c1921=4) & c1618=0 & c0611=0x28 & c0404=1) | +:vmovl.^udt^esize2021 Qd,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c1921=1 | c1921=2 | c1921=4) & c1618=0 & c0611=0x28 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c1921=1 | thv_c1921=2 | thv_c1921=4) & thv_c1618=0 & thv_c0611=0x28 & thv_c0404=1) ) & esize2021 & udt & Qd & Dm { Qd = VectorCopyLong(Dm,esize2021,udt); } -:vmovn.i^esize1819x2 Dd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0611=8 & c0404=0) | +:vmovn.i^esize1819x2 Dd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0611=8 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0611=8 & thv_c0404=0) ) & esize1819x2 & Dd & Qm { Dd = VectorCopyNarrow(Qm,esize1819x2); } -:vmovx.F16 Sd,Sm is (($(AMODE) & cond=15 & c2327=0x1d & c1921=0x6 & c1618=0 & c0611=0x29 & c0404=0) | - ($(TMODE_EorF) & thv_c2327=0x1d & thv_c1921=0x6 & thv_c1618=0 & thv_c0611=0x29 & thv_c0404=0) ) & Sd & Sm +:vmovx.F16 Sd,Sm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1921=0x6 & c1618=0 & c0611=0x29 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1d & thv_c1921=0x6 & thv_c1618=0 & thv_c0611=0x29 & thv_c0404=0) ) & Sd & Sm { local SmUpper:2 = Sm(2); Sd = zext(SmUpper); @@ -4261,25 +4264,25 @@ define pcodeop FloatVectorMult; define pcodeop VectorMultiply; define pcodeop PolynomialMultiply; -:vmul.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x06 & c2121=0 & c2020=0 & c0811=0xd & Q6=0 & c0404=1) | +:vmul.f32 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x06 & c2121=0 & c2020=0 & c0811=0xd & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c2020=0 & thv_c0811=0xd & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = FloatVectorMult(Dn,Dm,2:1,32:1); } -:vmul.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x06 & c2121=0 & c2020=0 & c0811=0xd & Q6=1 & c0404=1) | +:vmul.f32 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x06 & c2121=0 & c2020=0 & c0811=0xd & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c2020=0 & thv_c0811=0xd & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = FloatVectorMult(Qn,Qm,2:1,32:1); } -:vmul.f16 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x06 & c2121=0 & c2020=1 & c0811=13 & Q6=0 & c0404=1) | +:vmul.f16 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x06 & c2121=0 & c2020=1 & c0811=13 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c2020=1 & thv_c0811=13 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = FloatVectorMult(Dn,Dm,4:1,16:1); } -:vmul.f16 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x06 & c2121=0 & c2020=1 & c0811=13 & Q6=1 & c0404=1) | +:vmul.f16 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x06 & c2121=0 & c2020=1 & c0811=13 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c2020=1 & thv_c0811=13 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = FloatVectorMult(Qn,Qm,4:1,16:1); @@ -4308,43 +4311,43 @@ define pcodeop PolynomialMultiply; # VMUL (Integer and polynomial) # -:vmul.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=9 & Q6=0 & c0404=1) | +:vmul.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=9 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=9 & thv_Q6=0 & thv_c0404=1)) & esize2021 & Dn & Dd & Dm { Dd = VectorMultiply(Dn,Dm,esize2021); } -:vmul.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=9 & Q6=1 & c0404=1) | +:vmul.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=9 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=9 & thv_Q6=1 & thv_c0404=1)) & esize2021 & Qm & Qn & Qd { Qd = VectorMultiply(Qn,Qm,esize2021); } -:vmul.p8 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=0 & c0811=9 & Q6=0 & c0404=1) | +:vmul.p8 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=0 & c0811=9 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=9 & thv_Q6=0 & thv_c0404=1) ) & Dn & Dd & Dm { Dd = PolynomialMultiply(Dn,Dm,1:1); } -:vmul.p8 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c2021=0 & c0811=9 & Q6=1 & c0404=1) | +:vmul.p8 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=0 & c0811=9 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=9 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = PolynomialMultiply(Qn,Qm,1:1); } -:vmull.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=0xc & Q6=0 & c0404=0) | +:vmull.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=0xc & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=0xc & thv_Q6=0 & thv_c0404=0) ) & esize2021 & Dm & Dn & Qd & udt { Qd = VectorMultiply(Dn,Dm,esize2021,udt); } -:vmull.p8 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x5 & c2021=0 & c0811=0xe & Q6=0 & c0404=0) | +:vmull.p8 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x5 & c2021=0 & c0811=0xe & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=0 & thv_c0811=0xe & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Qd { Qd = PolynomialMultiply(Dn,Dm,1:1); } -:vmull.p64 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x5 & c2021=2 & c0811=0xe & Q6=0 & c0404=0) | +:vmull.p64 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x5 & c2021=2 & c0811=0xe & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=0xe & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Qd { Qd = PolynomialMultiply(Dn,Dm,8:1); @@ -4366,19 +4369,19 @@ vmlDmA: Dm_4^"["^M5^"]" is TMode=0 & c2021=2 & Dm_4 & M5 { el:4 = V vmlDmA: Dm_3^"["^index^"]" is TMode=1 & thv_c2021=1 & Dm_3 & thv_M5 & c0303 [ index = (thv_M5 << 1) + c0303; ] { el:4 = VectorGetElement(Dm_3, index:1, 2:1, 0:1); export el; } vmlDmA: Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & Dm_4 & thv_M5 { el:4 = VectorGetElement(Dm_4, thv_M5:1, 4:1, 0:1); export el; } -:vmul.^etype^esize2021 Qd,Qn,vmlDmA is ( ($(AMODE) & cond=15 & c2327=0x07 & (c2021=1 | c2021=2) & c0911=4 & c0606=1 & c0404=0) | +:vmul.^etype^esize2021 Qd,Qn,vmlDmA is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x07 & (c2021=1 | c2021=2) & c0911=4 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0911=4 & thv_c0606=1 & thv_c0404=0 ) ) & etype & esize2021 & Qn & Qd & vmlDmA { Qd = VectorMultiply(Qn,vmlDmA,esize2021); } -:vmul.^etype^esize2021 Dd,Dn,vmlDmA is ( ($(AMODE) & cond=15 & c2327=0x5 & (c2021=1 | c2021=2) & c0911=4 & c0606=1 & c0404=0) | +:vmul.^etype^esize2021 Dd,Dn,vmlDmA is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x5 & (c2021=1 | c2021=2) & c0911=4 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0911=4 & thv_c0606=1 & thv_c0404=0 ) ) & etype & esize2021 & Dn & Dd & vmlDmA { Dd = VectorMultiply(Dn,vmlDmA,esize2021); } -:vmull.^etype^esize2021 Qd,Dn,vmlDmA is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=10 & c0606=1 & c0404=0) | +:vmull.^etype^esize2021 Qd,Dn,vmlDmA is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=10 & c0606=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=10 & thv_c0606=1 & thv_c0404=0 ) ) & Dd & Dm & esize1819 & etype & esize2021 & Dn & Qd & vmlDmA { Qd = VectorMultiply(Dn,vmlDmA,esize2021); @@ -4388,38 +4391,38 @@ vmlDmA: Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & Dm_4 & thv_M5 { # VMVN (immediate) # -:vmvn.i32 Dd,simdExpImm_8 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=0 & c0808=0 & c0407=3 ) | +:vmvn.i32 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=0 & c0808=0 & c0407=3 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=0 & thv_c0808=0 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = ~simdExpImm_8; } -:vmvn.i32 Qd,simdExpImm_16 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=0 & c0808=0 & c0407=7 ) | +:vmvn.i32 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=0 & c0808=0 & c0407=7 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=0 & thv_c0808=0 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = ~simdExpImm_16; } -:vmvn.i16 Dd,simdExpImm_8 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=0 & c0407=3 ) | +:vmvn.i16 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=0 & c0407=3 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=2 & thv_c0808=0 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = ~simdExpImm_8; } -:vmvn.i16 Qd,simdExpImm_16 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=0 & c0407=7 ) | +:vmvn.i16 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=0 & c0407=7 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=2 & thv_c0808=0 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = ~simdExpImm_16; } -:vmvn.i32 Dd,simdExpImm_8 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0911=6 & c0407=3 ) | - ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0911=6 & thv_c0407=3) ) & Dd & simdExpImm_8 +:vmvn.i32 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0911=6 & c0808=0 & c0407=3 ) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0911=6 & thv_c0808=0 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = ~simdExpImm_8; } -:vmvn.i32 Qd,simdExpImm_16 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0911=6 & c0808=0 & c0407=7 ) | - ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0911=6 & thv_c0808=0 & thv_c0407=7) ) & Qd & simdExpImm_16 +:vmvn.i32 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0911=6 & c0808=0 & c0407=7 ) | + ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0911=6 & thv_c0808=0 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = ~simdExpImm_16; } @@ -4428,13 +4431,13 @@ vmlDmA: Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & Dm_4 & thv_M5 { # VMVN (register) # -:vmvn Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0811=5 & c0707=1 & Q6=0 & c0404=0 ) | +:vmvn Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0811=5 & c0707=1 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0811=5 & thv_c0707=1 & thv_Q6=0 & thv_c0404=0) ) & Dd & Dm { Dd = ~Dm; } -:vmvn Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0811=5 & c0707=1 & Q6=1 & c0404=0 ) | +:vmvn Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0811=5 & c0707=1 & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0811=5 & thv_c0707=1 & thv_Q6=1 & thv_c0404=0) ) & Qd & Qm { tmp1:8 = Qm:8; @@ -4447,25 +4450,25 @@ vmlDmA: Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & Dm_4 & thv_M5 { define pcodeop FloatVectorNeg; -:vneg.s^esize1819 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=7 & Q6=0 & c0404=0 ) | +:vneg.s^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=7 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=7 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 { Dd = FloatVectorNeg(Dm,1:1,esize1819); } -:vneg.s^esize1819 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=7 & Q6=1 & c0404=0 ) | +:vneg.s^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=7 & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=7 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 { Qd = FloatVectorNeg(Qm,1:1,esize1819); } -:vneg.f^fesize1819 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=1 & c0711=0xf & Q6=0 & c0404=0 ) | +:vneg.f^fesize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=1 & c0711=0xf & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=1 & thv_c0711=0xf & thv_c0606=0 & thv_c0404=0 ) ) & fesize1819 & Dm & Dd { Dd = FloatVectorNeg(Dm,2:1,fesize1819); } -:vneg.f^fesize1819 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819=2 & c1617=1 & c0711=0xf & Q6=1 & c0404=0 ) | +:vneg.f^fesize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819=2 & c1617=1 & c0711=0xf & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819=2 & thv_c1617=1 & thv_c0711=0xf & thv_c0606=1 & thv_c0404=0 ) ) & fesize1819 & Qd & Qm { Qd = FloatVectorNeg(Qm,2:1,fesize1819); @@ -4479,6 +4482,7 @@ define pcodeop FloatVectorNeg; :vnmla^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=11 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & COND & Dm & Dn & Dd { + build COND; product:8 = Dn f* Dm; Dd = (f- Dd) f+ (f- product); } @@ -4486,13 +4490,15 @@ define pcodeop FloatVectorNeg; :vnmla^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=10 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0) ) & COND & Sm & Sn & Sd { + build COND; product:4 = Sn f* Sm; Sd = (f- Sd) f+ (f- product); } -:vnmla.f16 Sd,Sn,Sm is ( ($(AMODE) & cond=0xe & c2327=0x1c & c2021=1 & c0811=9 & c0606=1 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=9 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sn & Sd +:vnmla^COND^".f16" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=9 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=9 & thv_c0606=1 & thv_c0404=0) ) & COND & Sm & Sn & Sd { + build COND; product:2 = Sn:2 f* Sm:2; product = (f- Sd:2) f+ (f- product); Sd = zext(product); @@ -4501,6 +4507,7 @@ define pcodeop FloatVectorNeg; :vnmls^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=11 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0) ) & COND & Dm & Dn & Dd { + build COND; product:8 = Dn f* Dm; Dd = product f- Dd; } @@ -4508,13 +4515,15 @@ define pcodeop FloatVectorNeg; :vnmls^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=10 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0) ) & COND & Sm & Sn & Sd { + build COND; product:4 = Sn f* Sm; Sd = product f- Sd; } -:vnmls.f16 Sd,Sn,Sm is ( ($(AMODE) & cond=0xe & c2327=0x1c & c2021=1 & c0811=9 & c0606=0 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=9 & thv_c0606=0 & thv_c0404=0) ) & Sm & Sn & Sd +:vnmls^COND^".f16" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=9 & c0606=0 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=9 & thv_c0606=0 & thv_c0404=0) ) & COND & Sm & Sn & Sd { + build COND; product:2 = Sn:2 f* Sm:2; product = product f- Sd:2; Sd = zext(product); @@ -4523,6 +4532,7 @@ define pcodeop FloatVectorNeg; :vnmul^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=11 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & COND & Dm & Dn & Dd { + build COND; product:8 = Dn f* Dm; Dd = f- product; } @@ -4534,9 +4544,10 @@ define pcodeop FloatVectorNeg; Sd = f- product; } -:vnmul.f16 Sd,Sn,Sm is ( ($(AMODE) & cond=0xe & c2327=0x1c & c2021=2 & c0811=9 & c0606=1 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=9 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sn & Sd +:vnmul^COND^".f16" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=9 & c0606=1 & c0404=0) | + ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=9 & thv_c0606=1 & thv_c0404=0) ) & COND & Sm & Sn & Sd { + build COND; product:2 = Sn:2 f* Sm:2; product = f- product; Sd = zext(product); @@ -4575,7 +4586,7 @@ define pcodeop FloatVectorNeg; @if defined(SIMD) #F6.1.141 VORR (register) 64-bit SIMD vector variant (A1 and T1) -:vorr Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=2 & c0811=1 & Q6=0 & c0404=1) | +:vorr Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c0811=1 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { @@ -4583,28 +4594,28 @@ define pcodeop FloatVectorNeg; } #F6.1.141 VORR (register) 128-bit SIMD vector variant (A1 and T1) -:vorr Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=2 & c0811=1 & Q6=1 & c0404=1) | +:vorr Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c0811=1 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qd & Qn & Qm { Qd = Qn | Qm; } #F6.1.140 VORR and F6.1.138 VORN (immediate) 64-bit SIMD vector variant -:vorr Dd,simdExpImm_8 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=1 ) | +:vorr Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=1 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011<3 & thv_c0808=1 & thv_c0407=1) ) & Dd & simdExpImm_8 { Dd = Dd | simdExpImm_8; } #F6.1.140 VORR and F6.1.138 VORN (immediate) 128-bit SIMD vector variant -:vorr Qd,simdExpImm_16 is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=5 ) | +:vorr Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=5 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011<3 & thv_c0808=1 & thv_c0407=5) ) & Qd & simdExpImm_16 { Qd = Qd | simdExpImm_16; } #F6.1.139 VORN (register) 64-bit SIMD vector variant (A1 and T1) -:vorn Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=3 & c0811=1 & Q6=0 & c0404=1) | +:vorn Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=3 & c0811=1 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { @@ -4612,7 +4623,7 @@ define pcodeop FloatVectorNeg; } #F6.1.139 VORN (register) 128-bit SIMD vector variant (A1 and T1) -:vorn Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2021=3 & c0811=1 & Q6=1 & c0404=1) | +:vorn Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=3 & c0811=1 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qd & Qn & Qm { Qd = Qn | ~Qm; @@ -4691,49 +4702,49 @@ vpopSd64List: "{"^buildVpopSd64List^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c define pcodeop SatQ; define pcodeop SignedSatQ; -:vqabs^".s"^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c0811=0 & Q6=0 & c0404=1) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=0 & thv_Q6=0 & thv_c0404=1)) & esize2021 & Dn & Dd & Dm +:vqabs^".s"^esize1819 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=7 & Q6=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=7 & thv_Q6=0 & thv_c0404=0)) & esize1819 & Dn & Dd & Dm { - Dd = VectorAbs(Dn,Dm,esize2021); - Dd = SatQ(Dd, esize2021, 0:1); + Dd = VectorAbs(Dn,Dm,esize1819); + Dd = SatQ(Dd, esize1819, 0:1); } -:vqabs^".s"^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c0811=0 & Q6=1 & c0404=1) | - ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=0 & thv_Q6=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd +:vqabs^".s"^esize1819 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=7 & Q6=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=7 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qn & Qd { - Qd = VectorAbs(Qn,Qm,esize2021); - Qd = SatQ(Qd, esize2021, 0:1); + Qd = VectorAbs(Qn,Qm,esize1819); + Qd = SatQ(Qd, esize1819, 0:1); } -:vqadd.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=0 & Q6=0 & c0404=1) | +:vqadd.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=0 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=0 & thv_Q6=0 & thv_c0404=1)) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorAdd(Dn,Dm,esize2021,udt); Dd = SatQ(Dd, esize2021, udt); } -:vqadd.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=0 & Q6=1 & c0404=1) | +:vqadd.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=0 & Q6=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=0 & thv_Q6=1 & thv_c0404=1) ) & udt & esize2021 & Qm & Qn & Qd { Qd = VectorAdd(Qn,Qm,esize2021,udt); Qd = SatQ(Qd, esize2021, udt); } -:vqmovn.i^esize1819x2 Dd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=5 & c0606 & c0404=0) | +:vqmovn.i^esize1819x2 Dd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=5 & c0606 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=5 & thv_c0404=0) ) & esize1819x2 & Dd & Qm { Dd = VectorCopyNarrow(Qm,esize1819x2,c0606:1); Dd = SatQ(Dd, esize1819x2,0:1); } -:vqmovun.i^esize1819x2 Dd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0611=9 & c0404=0) | +:vqmovun.i^esize1819x2 Dd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0611=9 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0611=9 & thv_c0404=0) ) & esize1819x2 & Dd & Qm { Dd = VectorCopyNarrow(Qm,esize1819x2,0:1); Dd = SatQ(Dd, esize1819x2,0:1); } -:vqdmlal.S^esize2021 Qd,Dn,Dm is ( ( $(AMODE) & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0x9 & c0606=0 & c0404=0 ) | +:vqdmlal.S^esize2021 Qd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0x9 & c0606=0 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0x9 & thv_c0606=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Qd { @@ -4741,7 +4752,7 @@ define pcodeop SignedSatQ; Qd = SatQ(Qd, esize2021,0:1); } -:vqdmlal.S^esize2021 Qd,Dn,vmlDmA is ( ( $(AMODE) & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0x3 & c0606=1 & c0404=0) | +:vqdmlal.S^esize2021 Qd,Dn,vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0x3 & c0606=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0x3 & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Qd { @@ -4749,7 +4760,7 @@ define pcodeop SignedSatQ; Qd = SatQ(Qd, esize2021,0:1); } -:vqdmlsl.S^esize2021 Qd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0xb & c0606=0 & c0404=0 ) | +:vqdmlsl.S^esize2021 Qd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0xb & c0606=0 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_c0606=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Qd { @@ -4757,7 +4768,7 @@ define pcodeop SignedSatQ; Qd = SatQ(Qd, esize2021,0:1); } -:vqdmlsl.S^esize2021 Qd, Dn, vmlDmA is ( ( $(AMODE) & cond=15 & c2327=5 & (c2021=1 | c2021=2)& c0811=0x7 & c0606=1 & c0404=0) | +:vqdmlsl.S^esize2021 Qd, Dn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & (c2021=1 | c2021=2)& c0811=0x7 & c0606=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0x7 & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Qd { @@ -4765,7 +4776,7 @@ define pcodeop SignedSatQ; Qd = SatQ(Qd, esize2021,0:1); } -:vqdmulh.S^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2324=0 & (c2021=1 | c2021=2) & c0811=0xb & Q6=0 & c0404=0 ) | +:vqdmulh.S^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2324=0 & (c2021=1 | c2021=2) & c0811=0xb & Q6=0 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_c0606=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Dd { @@ -4773,7 +4784,7 @@ define pcodeop SignedSatQ; Dd = SatQ(Dd, esize2021,0:1); } -:vqdmulh.S^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2324=0 & (c2021=1 | c2021=2) & c0811=0xb & Q6=1 & c0404=0 ) | +:vqdmulh.S^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2324=0 & (c2021=1 | c2021=2) & c0811=0xb & Q6=1 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & Qm & Qn & Qd { @@ -4781,7 +4792,7 @@ define pcodeop SignedSatQ; Qd = SatQ(Qd, esize2021,0:1); } -:vqdmulh.S^esize2021 Dd, Dn, vmlDmA is ( ( $(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2)& c0811=0xc & c0606=1 & c0404=0) | +:vqdmulh.S^esize2021 Dd, Dn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2)& c0811=0xc & c0606=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Dd { @@ -4789,7 +4800,7 @@ define pcodeop SignedSatQ; Dd = SatQ(Dd, esize2021,0:1); } -:vqdmulh.S^esize2021 Qd, Qn, vmlDmA is ( ( $(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0xc & c0606=1 & c0404=0) | +:vqdmulh.S^esize2021 Qd, Qn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0xc & c0606=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Qn & Qd { @@ -4797,7 +4808,7 @@ define pcodeop SignedSatQ; Qd = SatQ(Qd, esize2021,0:1); } -:vqdmull.S^esize2021 Qd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2327=5 & c2021<3 & c0811=0xD & Q6=0 & c0404=0 ) | +:vqdmull.S^esize2021 Qd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021<3 & c0811=0xD & Q6=0 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1f & thv_c2324=1 & thv_c2021<3 & thv_c0811=0xD & thv_Q6=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Qd { @@ -4805,15 +4816,15 @@ define pcodeop SignedSatQ; Qd = SatQ(Qd, esize2021,0:1); } -:vqdmull.S^esize2021 Qd, Dn, vmlDmA is ( ( $(AMODE) & cond=15 & c2327=5 & c2021<3 & c0811=0xb & Q6=1 & c0404=1 ) | - ( $(TMODE_E) & thv_c2327=0x1e & thv_c2324=1 & thv_c2021<3 & thv_c0811=0xb & thv_Q6=1 & thv_c0404=1 ) ) & esize2021 & vmlDmA & Dn & Qd +:vqdmull.S^esize2021 Qd, Dn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021<3 & c0811=0xb & Q6=1 & c0404=0 ) | + ( $(TMODE_E) & thv_c2327=0x1e & thv_c2324=1 & thv_c2021<3 & thv_c0811=0xb & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Qd { Qd = VectorDoubleMultiplyLong(Dn,vmlDmA,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } -:vqrdmulh.S^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & cond=15 & c2527=1 & c2324=2 & (c2021=1 | c2021=2) & c0811=0xb & Q6=0 & c0404=0 ) | +:vqrdmulh.S^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2324=2 & (c2021=1 | c2021=2) & c0811=0xb & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_Q6=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Dd { @@ -4821,7 +4832,7 @@ define pcodeop SignedSatQ; Dd = SatQ(Dd, esize2021,0:1); } -:vqrdmulh.S^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & cond=15 & c2527=1 & c2324=2 & (c2021=1 | c2021=2) & c0811=0xb & Q6=1 & c0404=0 ) | +:vqrdmulh.S^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2324=2 & (c2021=1 | c2021=2) & c0811=0xb & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & Qm & Qn & Qd { @@ -4829,7 +4840,7 @@ define pcodeop SignedSatQ; Qd = SatQ(Qd, esize2021,0:1); } -:vqrdmulh.S^esize2021 Dd, Dn, vmlDmA is ( ( $(AMODE) & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2)& c0811=0xd & Q6=1 & c0404=0) | +:vqrdmulh.S^esize2021 Dd, Dn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2)& c0811=0xd & Q6=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1f & thv_c2323=1 & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Dd { @@ -4837,7 +4848,7 @@ define pcodeop SignedSatQ; Dd = SatQ(Dd, esize2021,0:1); } -:vqrdmulh.S^esize2021 Qd, Qn, vmlDmA is ( ( $(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0xd & Q6=1 & c0404=0) | +:vqrdmulh.S^esize2021 Qd, Qn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0xd & Q6=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2323=1 & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Qn & Qd { @@ -4846,14 +4857,14 @@ define pcodeop SignedSatQ; } -:vqsub.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=2 & Q6=0 & c0404=1) | +:vqsub.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=2 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c0811=2 & thv_Q6=0 & thv_c0404=1)) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorSub(Dn,Dm,esize2021,udt); Dd = SatQ(Dd, esize2021, udt); } -:vqsub.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=2 & Q6=1 & c0404=1) | +:vqsub.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=2 & Q6=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c0811=2 & thv_Q6=1 & thv_c0404=1) ) & udt & esize2021 & Qm & Qn & Qd { Qd = VectorSub(Qn,Qm,esize2021,udt); @@ -4864,13 +4875,13 @@ define pcodeop SignedSatQ; # VRECPE define pcodeop VectorReciprocalEstimate; -:vrecpe.^fdt^32 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=0 & Q6=1 & c0404=0) | +:vrecpe.^fdt^32 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=0 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=0 & thv_Q6=1 & thv_c0404=0) ) & fdt & Qm & Qd { Qd = VectorReciprocalEstimate(Qm,fdt); } -:vrecpe.^fdt^32 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=0 & Q6=0 & c0404=0) | +:vrecpe.^fdt^32 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=0 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=0 & thv_Q6=0 & thv_c0404=0) ) & fdt & Dm & Dd { Dd = VectorReciprocalEstimate(Dm,fdt); @@ -4880,13 +4891,13 @@ define pcodeop VectorReciprocalEstimate; # VRECPS define pcodeop VectorReciprocalStep; -:vrecps.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x4 & c2021=0 & c0811=0xf & Q6=1 & c0404=1) | +:vrecps.f32 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x4 & c2021=0 & c0811=0xf & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=0xf & thv_Q6=1 & thv_c0404=1) ) & Qn & Qm & Qd { Qd = VectorReciprocalStep(Qn,Qm); } -:vrecps.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x4 & c2021=0 & c0811=0xf & Q6=0 & c0404=1) | +:vrecps.f32 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x4 & c2021=0 & c0811=0xf & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=0xf & thv_Q6=0 & thv_c0404=1) ) & Dn & Dm & Dd { Dd = VectorReciprocalStep(Dn,Dm); @@ -4898,37 +4909,37 @@ define pcodeop VectorReciprocalStep; define pcodeop vrev; -:vrev16.^esize1819x3 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=2 & c0606=1 & c0404=0) | +:vrev16.^esize1819x3 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=2 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=2 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & esize1819x3 { Qd = vrev(Qm,esize1819x3); } -:vrev32.^esize1819x3 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=1 & c0606=1 & c0404=0) | +:vrev32.^esize1819x3 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=1 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=1 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & esize1819x3 { Qd = vrev(Qm,esize1819x3); } -:vrev64.^esize1819x3 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=0 & c0606=1 & c0404=0) | +:vrev64.^esize1819x3 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=0 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=0 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & esize1819x3 { Qd = vrev(Qm,esize1819x3); } -:vrev16.^esize1819x3 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=2 & c0606=0 & c0404=0) | +:vrev16.^esize1819x3 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=2 & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=2 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & esize1819x3 { Dd = vrev(Dm,esize1819x3); } -:vrev32.^esize1819x3 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=1 & c0606=0 & c0404=0) | +:vrev32.^esize1819x3 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=1 & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=1 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & esize1819x3 { Dd = vrev(Dm,esize1819x3); } -:vrev64.^esize1819x3 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=0 & c0606=0 & c0404=0) | +:vrev64.^esize1819x3 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=0 & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=0 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & esize1819x3 { Dd = vrev(Dm,esize1819x3); @@ -4977,41 +4988,41 @@ ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_c2021=1 & thv_L7=0 & thv_c1621 [ sh ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_c2121=1 & thv_L7=0 & thv_c1621 [ shift_amt = thv_c1621 - 32; ] { export *[const]:8 shift_amt; } ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_L7=1 & thv_c1621 [ shift_amt = thv_c1621 - 0; ] { export *[const]:8 shift_amt; } -:vqrshl.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=1 & c0404=1) | +:vqrshl.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorRoundShiftLeft(Qm,ShiftImmLLI,ShiftSize,udt); } -:vqrshl.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=0 & c0404=1) | +:vqrshl.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorRoundShiftLeft(Dm,ShiftImmLLI,ShiftSize,udt); } -:vqshrn.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x24 & c0404=1) | +:vqshrn.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x24 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x24 & thv_c0404=1) ) & udt & esize2021 & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); Dd = SatQ(Dd,esize2021,udt); } -:vqshrun.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2327=7 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x20 & c0404=1) | +:vqshrun.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x20 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x20 & thv_c0404=1) ) & udt & esize2021 & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); Dd = SatQ(Dd,esize2021,udt); } -:vqrshrn.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x25 & c0404=1) | +:vqrshrn.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x25 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x25 & thv_c0404=1) ) & udt & esize2021 & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorRoundShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); Dd = SatQ(Dd,esize2021,udt); } -:vqrshrun.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x21 & c0404=1) | +:vqrshrun.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x21 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x21 & thv_c0404=1) ) & udt & esize2021 & ShiftImmRLI & Dd & Qm { Dd = VectorRoundShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); @@ -5019,64 +5030,64 @@ ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_L7=1 & thv_c1621 [ sh } -:vqshl.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=7 & c0606=1 & c0404=1) | +:vqshl.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=7 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1621 & thv_c0811=7 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorShiftLeft(Qm,ShiftImmLLI,ShiftSize,udt); } -:vqshl.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=7 & c0606=0 & c0404=1) | +:vqshl.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=7 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1621 & thv_c0811=7 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorShiftLeft(Dm,ShiftImmLLI,ShiftSize,udt); } -:vqshlu.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=6 & c0606=1 & c0404=1) | +:vqshlu.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=6 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2828=1 & thv_c2327=0x1f & thv_c1621 & thv_c0811=6 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorShiftLeft(Qm,ShiftImmLLI,ShiftSize,udt); } -:vqshlu.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=6 & c0606=0 & c0404=1) | +:vqshlu.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=6 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2828=1 & thv_c2327=0x1f & thv_c1621 & thv_c0811=6 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorShiftLeft(Dm,ShiftImmLLI,ShiftSize,udt); } -:vqshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=1 & c0404=1) | +:vqshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=1 & thv_c0404=1) ) & udt & esize2021 & Qd & Qm & Qn { Qd = VectorShiftLeft(Qm,Qn,esize2021,udt); } -:vqshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=0 & c0404=1) | +:vqshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=0 & thv_c0404=1) ) & udt & esize2021 & Dd & Dm & Dn { Dd = VectorShiftLeft(Dm,Dn,esize2021,udt); } -:vshl.I^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2327=5 & c0811=5 & c0606=1 & c0404=1) | +:vshl.I^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=5 & c0606=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorShiftLeft(Qm,ShiftImmLLI,ShiftSize,0:1); } -:vshl.I^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2327=5 & c0811=5 & c0606=0 & c0404=1) | +:vshl.I^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=5 & c0606=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=0 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorShiftLeft(Dm,ShiftImmLLI,ShiftSize,0:1); } -:vshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=1 & c0404=0) | +:vshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=1 & thv_c0404=0) ) & udt & esize2021 & Qd & Qm & Qn { Qd = VectorShiftLeft(Qm,Qn,esize2021,udt); } -:vshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=0 & c0404=0) | +:vshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=0 & thv_c0404=0) ) & udt & esize2021 & Dd & Dm & Dn { Dd = VectorShiftLeft(Dm,Dn,esize2021,udt); @@ -5084,104 +5095,104 @@ ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_L7=1 & thv_c1621 [ sh define pcodeop VectorShiftLongLeft; -:vshll.^udt^ShiftSize Qd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=10 & c0607=0 & c0404=1) | +:vshll.^udt^ShiftSize Qd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=10 & c0607=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=10 & thv_c0607=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Dm { Qd = VectorShiftLongLeft(Dm,ShiftImmLLI); } -:vshll.^udt^esize1819 Qd, Dm, "#"^esize1819x3 is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=3 & c0607=0 & c0404=0) | +:vshll.^udt^esize1819 Qd, Dm, "#"^esize1819x3 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=3 & c0607=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=2 & thv_c0811=3 & thv_c0607=0 & thv_c0404=0) ) & udt & esize1819 & esize1819x3 & Qd & Dm { Qd = VectorShiftLongLeft(Dm,esize1819x3); } -:vrshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=1 & c0404=0) | +:vrshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=1 & thv_c0404=0) ) & udt & esize2021 & Qd & Qm & Qn { Qd = VectorRoundShiftLeft(Qm,esize2021,Qn); } -:vrshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=0 & c0404=0) | +:vrshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=0 & thv_c0404=0) ) & udt & esize2021 & Dd & Dm & Dn { Dd = VectorRoundShiftLeft(Dm,esize2021,Dn); } -:vrshr.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=2 & c0606=1 & c0404=1) | +:vrshr.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=2 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=2 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorRoundShiftRight(Qm,ShiftImmRLI); } -:vrshr.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=2 & c0606=0 & c0404=1) | +:vrshr.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=2 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=2 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorRoundShiftRight(Dm,ShiftImmRLI); } -:vrshrn.^ShiftSize Dd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2327=5 & c0811=8 & c0707=0 & c0606=1 & c0404=1) | +:vrshrn.^ShiftSize Dd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=8 & c0707=0 & c0606=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=8 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorRoundShiftRightNarrow(Qm,ShiftImmRLI); } -:vrsra.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=3 & c0606=1 & c0404=1) | +:vrsra.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=3 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=3 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorRoundShiftRightAccumulate(Qd, Qm,ShiftImmRLI); } -:vrsra.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=3 & c0606=0 & c0404=1) | +:vrsra.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=3 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=3 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorRoundShiftRightAccumulate(Dd, Dm,ShiftImmRLI); } -:vsli.^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2327=7 & c0811=5 & c0606=0 & c0404=1) | +:vsli.^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c0811=5 & c0606=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=0 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorShiftLeftInsert(Dd, Dm,ShiftImmLLI); } -:vsli.^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & cond=15 & c2327=7 & c0811=5 & c0606=1 & c0404=1) | +:vsli.^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c0811=5 & c0606=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorShiftLeftInsert(Qd, Qm,ShiftImmLLI); } define pcodeop VectorWidenMultipyAccumulate; -:vsmmla.s8 Dd, Dm, Dn is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=0 & c0404=0) | +:vsmmla.s8 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & Dn { Dd = VectorWidenMultipyAccumulate(Dm,Dn,0:1); } -:vsmmla.s8 Qd, Qm, Qn is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=1 & c0404=0) | +:vsmmla.s8 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & Qn { Qd = VectorWidenMultipyAccumulate(Qm,Qn,0:1); } -:vummla.u8 Dd, Dm, Dn is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=0 & c0404=1) | +:vummla.u8 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=0 & thv_c0404=1) ) & Dd & Dm & Dn { Dd = VectorWidenMultipyAccumulate(Dm,Dn,1:1); } -:vummla.u8 Qd, Qm, Qn is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=1 & c0404=1) | +:vummla.u8 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=1 & thv_c0404=1) ) & Qd & Qm & Qn { Qd = VectorWidenMultipyAccumulate(Qm,Qn,1:1); } -:vusmmla.s8 Dd, Dm, Dn is ( ($(AMODE) & cond=15 & c2327=0x19 & c2021=2 & c0811=0xc & c0606=0 & c0404=0) | +:vusmmla.s8 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x19 & c2021=2 & c0811=0xc & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & Dn { Dd = VectorWidenMultipyAccumulate(Dm,Dn,2:1); } -:vusmmla.s8 Qd, Qm, Qn is ( ($(AMODE) & cond=15 & c2327=0x19 & c2021=2 & c0811=0xc & c0606=1 & c0404=0) | +:vusmmla.s8 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x19 & c2021=2 & c0811=0xc & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & Qn { Qd = VectorWidenMultipyAccumulate(Qm,Qn,2:1); @@ -5205,25 +5216,25 @@ define pcodeop VectorWidenMultipyAccumulate; Dd = sqrt(Dm); } -:vsra.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=1 & c0606=1 & c0404=1) | +:vsra.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=1 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=1 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorShiftRightAccumulate(Qd, Qm,ShiftImmRLI); } -:vsra.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=1 & c0606=0 & c0404=1) | +:vsra.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=1 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=1 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorShiftRightAccumulate(Dd, Dm,ShiftImmRLI); } -:vsri.^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c0811=4 & c0606=1 & c0404=1) | +:vsri.^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c0811=4 & c0606=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=4 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorShiftRightInsert(Qd, Qm,ShiftImmRLI); } -:vsri.^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2424=1 & c2323=1 & c0811=4 & c0606=0 & c0404=1) | +:vsri.^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c0811=4 & c0606=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=4 & thv_c0606=0 & thv_c0404=1) ) & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorShiftRightInsert(Dd, Dm,ShiftImmRLI); @@ -5233,13 +5244,13 @@ define pcodeop VectorWidenMultipyAccumulate; # VSHR # -:vshr.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=0 & c0606=1 & c0404=1) | +:vshr.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=0 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=0 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorShiftRight(Qm,ShiftImmRLI); } -:vshr.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c0811=0 & c0606=0 & c0404=1) | +:vshr.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=0 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=0 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorShiftRight(Dm,ShiftImmRLI); @@ -5247,7 +5258,7 @@ define pcodeop VectorWidenMultipyAccumulate; define pcodeop VectorShiftNarrowRight; -:vshrn.^ShiftSize Dd, Qm, ShiftImmRLI is ( ($(AMODE) & cond=15 & c2327=5 & c0811=8 & c0607=0 & c0404=1) | +:vshrn.^ShiftSize Dd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=8 & c0607=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=8 & thv_c0607=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorShiftNarrowRight(Qm,ShiftImmRLI); @@ -5257,13 +5268,13 @@ define pcodeop VectorShiftNarrowRight; # VRSQRTE define pcodeop VectorReciprocalSquareRootEstimate; -:vrsqrte.^fdt^32 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=1 & Q6=1 & c0404=0) | +:vrsqrte.^fdt^32 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=1 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=1 & thv_Q6=1 & thv_c0404=0) ) & fdt & Qm & Qd { Qd = VectorReciprocalSquareRootEstimate(Qm,fdt); } -:vrsqrte.^fdt^32 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=1 & Q6=0 & c0404=0) | +:vrsqrte.^fdt^32 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=1 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=1 & thv_Q6=0 & thv_c0404=0) ) & fdt & Dm & Dd { Dd = VectorReciprocalSquareRootEstimate(Dm,fdt); @@ -5273,13 +5284,13 @@ define pcodeop VectorReciprocalSquareRootEstimate; # VRSQRTS define pcodeop VectorReciprocalSquareRootStep; -:vrsqrts.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x4 & c2021=2 & c0811=0xf & Q6=1 & c0404=1) | +:vrsqrts.f32 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x4 & c2021=2 & c0811=0xf & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=0xf & thv_Q6=1 & thv_c0404=1) ) & Qn & Qm & Qd { Qd = VectorReciprocalSquareRootStep(Qn,Qm); } -:vrsqrts.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x4 & c2021=2 & c0811=0xf & Q6=0 & c0404=1) | +:vrsqrts.f32 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x4 & c2021=2 & c0811=0xf & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=0xf & thv_Q6=0 & thv_c0404=1) ) & Dn & Dm & Dd { Dd = VectorReciprocalSquareRootStep(Dn,Dm); @@ -5313,14 +5324,14 @@ vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=2 & thv_D22 & thv_c @define Vst1DdList "(c0811=2 | c0811=6 | c0811=7 | c0811=10)" @define T_Vst1DdList "(thv_c0811=2 | thv_c0811=6 | thv_c0811=7 | thv_c0811=10)" -:vst1.^esize0607 vst1DdList,RnAligned45 is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0003=15 & $(Vst1DdList)) | +:vst1.^esize0607 vst1DdList,RnAligned45 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0003=15 & $(Vst1DdList)) | ($(TMODE_F) &thv_c2327=18 & thv_c2021=0 & thv_c0003=15 & $(T_Vst1DdList)) ) & RnAligned45 & esize0607 & vst1DdList { mult_addr = RnAligned45; build vst1DdList; } -:vst1.^esize0607 vst1DdList,RnAligned45^"!" is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0003=13 & $(Vst1DdList)) | +:vst1.^esize0607 vst1DdList,RnAligned45^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0003=13 & $(Vst1DdList)) | ($(TMODE_F) &thv_c2327=18 & thv_c2021=0 & thv_c0003=13 & $(T_Vst1DdList)) ) & RnAligned45 & esize0607 & vst1DdList { mult_addr = RnAligned45; @@ -5328,7 +5339,7 @@ vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=2 & thv_D22 & thv_c RnAligned45 = RnAligned45 + (8 * vst1DdList); } -:vst1.^esize0607 vst1DdList,RnAligned45,VRm is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & $(Vst1DdList)) | +:vst1.^esize0607 vst1DdList,RnAligned45,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & $(Vst1DdList)) | ($(TMODE_F) &thv_c2327=18 & thv_c2021=0 & $(T_Vst1DdList)) ) & RnAligned45 & esize0607 & VRm & vst1DdList { mult_addr = RnAligned45; @@ -5360,20 +5371,20 @@ vst1DdElement2: Dd^"["^vst1Index^"]" is Dd & vst1Index & c1011=2 @define Vst1DdElement2 "((c1011=0 & c0404=0) | (c1011=1 & c0505=0) | (c1011=2 & (c0406=0 | c0406=3))) & vst1DdElement2" -:vst1.^esize1011 vst1DdElement2,RnAligned2 is $(AMODE) & cond=15 & c2327=9 & c2021=0 & RnAligned2 & esize1011 & c0809=0 & c0003=15 & $(Vst1DdElement2) +:vst1.^esize1011 vst1DdElement2,RnAligned2 is $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & RnAligned2 & esize1011 & c0809=0 & c0003=15 & $(Vst1DdElement2) { mult_addr = RnAligned2; build vst1DdElement2; } -:vst1.^esize1011 vst1DdElement2,RnAligned2^"!" is $(AMODE) & cond=15 & c2327=9 & c2021=0 & RnAligned2 & esize1011 & c0809=0 & c0003=13 & $(Vst1DdElement2) +:vst1.^esize1011 vst1DdElement2,RnAligned2^"!" is $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & RnAligned2 & esize1011 & c0809=0 & c0003=13 & $(Vst1DdElement2) { mult_addr = RnAligned2; build vst1DdElement2; RnAligned2 = RnAligned2 + esize1011; } -:vst1.^esize1011 vst1DdElement2,RnAligned2,VRm is $(AMODE) & cond=15 & c2327=9 & c2021=0 & RnAligned2 & esize1011 & c0809=0 & VRm & $(Vst1DdElement2) +:vst1.^esize1011 vst1DdElement2,RnAligned2,VRm is $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & RnAligned2 & esize1011 & c0809=0 & VRm & $(Vst1DdElement2) { mult_addr = RnAligned2; build vst1DdElement2; @@ -5515,14 +5526,14 @@ vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=1 & thv_c0811=3 & @define Vst2DdList "(c0811=3 | c0811=8 | c0811=9)" @define T_Vst2DdList "(thv_c0811=3 | thv_c0811=8 | thv_c0811=9)" -:vst2.^esize0607 vst2DdList,RnAligned45 is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0607<3 & c0003=15 & $(Vst2DdList) ) | +:vst2.^esize0607 vst2DdList,RnAligned45 is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0607<3 & c0003=15 & $(Vst2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0607<3 & thv_c0003=15 & $(T_Vst2DdList) ) ) & RnAligned45 & esize0607 & vst2DdList { mult_addr = RnAligned45; build vst2DdList; } -:vst2.^esize0607 vst2DdList,RnAligned45^"!" is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0607<3 & c0003=13 & $(Vst2DdList) ) | +:vst2.^esize0607 vst2DdList,RnAligned45^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0607<3 & c0003=13 & $(Vst2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0607<3 & thv_c0003=13 & $(T_Vst2DdList) ) ) & RnAligned45 & esize0607 & vst2DdList { mult_addr = RnAligned45; @@ -5530,7 +5541,7 @@ vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=1 & thv_c0811=3 & RnAligned45 = RnAligned45 + (8 * vst2DdList); } -:vst2.^esize0607 vst2DdList,RnAligned45,VRm is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0607<3 & $(Vst2DdList) ) | +:vst2.^esize0607 vst2DdList,RnAligned45,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0607<3 & $(Vst2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0607<3 & $(T_Vst2DdList) ) ) & RnAligned45 & VRm & esize0607 & vst2DdList { mult_addr = RnAligned45; @@ -5566,15 +5577,15 @@ vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011 vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVst2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVst2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double -:vst2.^esize1011 vst2DdList2,vst2RnAligned2 is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 & c0003=15 ) | +:vst2.^esize1011 vst2DdList2,vst2RnAligned2 is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 & c0003=15 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=1 & thv_c0003=15 ) ) & vst2RnAligned2 & esize1011 & vst2DdList2 unimpl -:vst2.^esize1011 vst2DdList2,vst2RnAligned2^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 & c0003=13 ) | +:vst2.^esize1011 vst2DdList2,vst2RnAligned2^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 & c0003=13 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=1 & thv_c0003=13 ) ) & vst2RnAligned2 & esize1011 & vst2DdList2 unimpl -:vst2.^esize1011 vst2DdList2,vst2RnAligned2,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 ) | +:vst2.^esize1011 vst2DdList2,vst2RnAligned2,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=1 ) ) & vst2RnAligned2 & esize1011 & vst2DdList2 & VRm unimpl @@ -5606,15 +5617,15 @@ vst3DdList: "{"^buildvst3DdList^"}" is TMode=1 & thv_c0811=4 & thv_D22 & thv_c12 vst3DdList: "{"^buildvst3DdList^"}" is TMode=1 & thv_c0811=5 & thv_D22 & thv_c1215 & buildvst3DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double -:vst3.^esize0607 vst3DdList,vst3RnAligned is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0003=15 ) | +:vst3.^esize0607 vst3DdList,vst3RnAligned is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0003=15 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0003=15 ) ) & vst3RnAligned & esize0607 & vst3DdList unimpl -:vst3.^esize0607 vst3DdList,vst3RnAligned^"!" is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0 & c0003=13 ) | +:vst3.^esize0607 vst3DdList,vst3RnAligned^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0003=13 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0003=13 ) ) & vst3RnAligned & esize0607 & vst3DdList unimpl -:vst3.^esize0607 vst3DdList,vst3RnAligned,VRm is ( ( $(AMODE) & cond=15 & c2327=8 & c2021=0) | +:vst3.^esize0607 vst3DdList,vst3RnAligned,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 ) ) & vst3RnAligned & esize0607 & vst3DdList & VRm unimpl @@ -5630,15 +5641,15 @@ vst3DdList2: "{"^buildvst3DdList^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011= vst3DdList2: "{"^buildvst3DdList^"}" is TMode=1 & thv_D22 & thv_c1215 & buildvst3DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single vst3DdList2: "{"^buildvst3DdList^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildvst3DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double -:vst3.^esize1011 vst3DdList2,vst3Rn is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 & c0003=15 ) | +:vst3.^esize1011 vst3DdList2,vst3Rn is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 & c0003=15 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=2 & thv_c0003=15 ) ) & vst3Rn & esize1011 & vst3DdList2 unimpl -:vst3.^esize1011 vst3DdList2,vst3Rn^"!" is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 & c0003=13 ) | +:vst3.^esize1011 vst3DdList2,vst3Rn^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 & c0003=13 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=2 & thv_c0003=13 ) ) & vst3Rn & esize1011 & vst3DdList2 unimpl -:vst3.^esize1011 vst3DdList2,vst3Rn,VRm is ( ( $(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 ) | +:vst3.^esize1011 vst3DdList2,vst3Rn,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=2 ) ) & vst3Rn & esize1011 & vst3DdList2 & VRm unimpl @@ -5666,13 +5677,13 @@ vst4DdList: "{"^buildVst4DdList^"}" is TMode=0 & c0808=1 & D22 & c1215 & buildVs vst4DdList: "{"^buildVst4DdList^"}" is TMode=1 & thv_c0808=0 & thv_D22 & thv_c1215 & buildVst4DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single vst4DdList: "{"^buildVst4DdList^"}" is TMode=1 & thv_c0808=1 & thv_D22 & thv_c1215 & buildVst4DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double -:vst4.^esize0607 vst4DdList,vst4RnAligned is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3 & c0003=15) | +:vst4.^esize0607 vst4DdList,vst4RnAligned is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3 & c0003=15) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0911=0 & thv_c0607<3 & thv_c0003=15) ) & vst4RnAligned & esize0607 & vst4DdList unimpl -:vst4.^esize0607 vst4DdList,vst4RnAligned^"!" is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3 & c0003=13) | +:vst4.^esize0607 vst4DdList,vst4RnAligned^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3 & c0003=13) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0911=0 & thv_c0607<3 & thv_c0003=13) ) & vst4RnAligned & esize0607 & vst4DdList unimpl -:vst4.^esize0607 vst4DdList,vst4RnAligned,VRm is ( ($(AMODE) & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3) | +:vst4.^esize0607 vst4DdList,vst4RnAligned,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0911=0 & thv_c0607<3) ) & VRm & vst4RnAligned & esize0607 & vst4DdList unimpl ####### @@ -5707,13 +5718,13 @@ vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011 vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVst4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVst4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double -:vst4.^esize1011 vst4DdList2,vst4RnAligned2 is ( ($(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3 & c0003=15) | +:vst4.^esize1011 vst4DdList2,vst4RnAligned2 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3 & c0003=15) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=3 & thv_c0003=15) ) & vst4RnAligned2 & esize1011 & vst4DdList2 unimpl -:vst4.^esize1011 vst4DdList2,vst4RnAligned2^"!" is ( ($(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3 & c0003=13) | +:vst4.^esize1011 vst4DdList2,vst4RnAligned2^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3 & c0003=13) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=3 & thv_c0003=13) ) & vst4RnAligned2 & esize1011 & vst4DdList2 unimpl -:vst4.^esize1011 vst4DdList2,vst4RnAligned2,VRm is ( ($(AMODE) & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3) | +:vst4.^esize1011 vst4DdList2,vst4RnAligned2,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=3) ) & VRm & vst4RnAligned2 & esize1011 & vst4DdList2 unimpl @endif # SIMD @@ -5825,43 +5836,43 @@ vstmSdList: "{"^buildVstmSdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 define pcodeop FloatVectorSub; define pcodeop VectorSubAndNarrow; -:vsub.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=6 & c0811=8 & Q6=0 & c0404=0) | +:vsub.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c0811=8 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dn & Dd & Dm { Dd = VectorSub(Dn,Dm,esize2021); } -:vsub.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=6 & c0811=8 & Q6=1 & c0404=0) | +:vsub.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c0811=8 & Q6=1 & c0404=0) | ($(TMODE_F) &thv_c2327=0x1e & thv_c0811=8 & thv_Q6=1 & thv_c0404=0) ) & esize2021 & Qm & Qn & Qd { Qd = VectorSub(Qn,Qm,esize2021); } -:vsub.f32 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=0 & c0404=0) | +:vsub.f32 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Dd { Dd = FloatVectorSub(Dn,Dm,2:1,32:1); } -:vsub.f32 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=1 & c0404=0) | +:vsub.f32 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_Q6=1 & thv_c0404=0) ) & Qn & Qd & Qm { Qd = FloatVectorSub(Qn,Qm,2:1,32:1); } -:vsubhn.i^esize2021x2 Dd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=5 & c0811=6 & Q6=0 & c0404=0) | +:vsubhn.i^esize2021x2 Dd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=6 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=6 & thv_Q6=0 & thv_c0404=0)) & esize2021x2 & Dd & Qn & Qm { Dd = VectorSubAndNarrow(Qn,Qm,esize2021x2); } -:vsubl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=2 & c0606=0 & c0404=0) | +:vsubl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=2 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=2 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Dn & Qd & Dm { Qd = VectorSub(Dn,Dm,esize2021,udt); } -:vsubw.^udt^esize2021 Qd,Qn,Dm is ( ($(AMODE) & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=3 & c0606=0 & c0404=0) | +:vsubw.^udt^esize2021 Qd,Qn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=3 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=3 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Qn & Qd & Dm { Qd = VectorSub(Qn,Dm,esize2021,udt); @@ -5900,7 +5911,7 @@ define pcodeop VectorSubAndNarrow; # VSWP # -:vswp Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=0 & Q6=0 & c0404=0 ) | +:vswp Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=0 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=0 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { tmp:8 = Dm; @@ -5908,7 +5919,7 @@ define pcodeop VectorSubAndNarrow; Dd = tmp; } -:vswp Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=0 & Q6=1 & c0404=0 ) | +:vswp Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=0 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { tmp:16 = Qm; @@ -5940,13 +5951,13 @@ vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=2 & thv_N7 & thv_c161 vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=3 & thv_N7 & thv_c1619 & buildVtblDdList [ regNum=(thv_N7<<4)+thv_c1619-1; counter=4; ] { export 4:4; } -:vtbl.8 VRd,vtblDdList,VRm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1011=2 & c0606=0 & c0404=0) | +:vtbl.8 VRd,vtblDdList,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1011=2 & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1011=2 & thv_c0606=0 & thv_c0404=0 ) ) & VRm & VRd & VRn & vtblDdList { VRd = VectorTableLookup(VRm,VRn,vtblDdList); } -:vtbx.8 VRd,vtblDdList,VRm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1011=2 & c0606=1 & c0404=0) | +:vtbx.8 VRd,vtblDdList,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1011=2 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1011=2 & thv_c0606=1 & thv_c0404=0 ) ) & VRm & VRd & VRn & vtblDdList { VRd = VectorTableLookup(VRm,VRn,vtblDdList); @@ -5959,13 +5970,13 @@ vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=3 & thv_N7 & thv_c161 define pcodeop VectorTest; -:vtst.^esize2021 Qd, Qn, Qm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=8 & c0606=1 & c0404=1) | +:vtst.^esize2021 Qd, Qn, Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=8 & c0606=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_c0606=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd { Qd = VectorTest(Qn, Qm); } -:vtst.^esize2021 Dd, Dn, Dm is ( ($(AMODE) & cond=15 & c2327=4 & c0811=8 & c0606=0 & c0404=1) | +:vtst.^esize2021 Dd, Dn, Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=8 & c0606=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_c0606=0 & thv_c0404=1) ) & esize2021 & Dm & Dn & Dd { Dd = VectorTest(Dn, Dm); @@ -5973,13 +5984,13 @@ define pcodeop VectorTest; define pcodeop VectorTranspose; -:vtrn^"."^esize1819 Dd,Dm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=0 & c0707=1 & Q6=0 & c0404=0) | +:vtrn^"."^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=0 & c0707=1 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=2 & thv_c0811=0 & thv_c0707=1 & thv_Q6=0 & thv_c0404=0)) & esize1819 & Dd & Dm { Dd = VectorTranspose(Dm,esize1819); } -:vtrn^"."^esize1819 Qd,Qm is ( ($(AMODE) & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=0 & c0707=1 & Q6=1 & c0404=0) | +:vtrn^"."^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=0 & c0707=1 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=2 & thv_c0811=0 & thv_c0707=1 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd { Qd = VectorTranspose(Qm,esize1819); @@ -5992,86 +6003,86 @@ define pcodeop VectorUnsignedDotProduct; define pcodeop VectorSignedUnsignedDotProduct; define pcodeop VectorUnsignedSignedDotProduct; -:vsdot.s8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=0 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm0 & Mindex & Dn & Dd +:vsdot.s8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorSignedDotProduct(Dn,Dm0,Mindex); } -:vsdot.s8 Qd,Qn,Qm0^Mindex is ( ($(AMODE) & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=1 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Qm0 & Mindex & Qn & Qd +:vsdot.s8 Qd,Qn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Dm0 & Mindex & Qn & Qd { - Qd = VectorSignedDotProduct(Qn,Qm0,Mindex); + Qd = VectorSignedDotProduct(Qn,Dm0,Mindex); } -:vsdot.s8 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=0 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm & Dn & Dd +:vsdot.s8 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm & Dn & Dd { Dd = VectorSignedDotProduct(Dn,Dm); } -:vsdot.s8 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=1 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Qm & Qn & Qd +:vsdot.s8 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Qm & Qn & Qd { Qd = VectorSignedDotProduct(Qn,Qm); } -:vudot.u8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=0 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=1) ) & Dm0 & Mindex & Dn & Dd +:vudot.u8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=0 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=1) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorUnsignedDotProduct(Dn,Dm0,Mindex); } -:vudot.u8 Qd,Qn,Qm0^Mindex is ( ($(AMODE) & cond=15 & c2327=0x1c & c2021=0 & c0811=0xd & c0606=1 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=1) ) & Qm0 & Mindex & Qn & Qd +:vudot.u8 Qd,Qn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=1 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=1) ) & Dm0 & Mindex & Qn & Qd { - Qd = VectorUnsignedDotProduct(Qn,Qm0,Mindex); + Qd = VectorUnsignedDotProduct(Qn,Dm0,Mindex); } -:vudot.u8 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=0 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=1) ) & Dm & Dn & Dd +:vudot.u8 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=0 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=1) ) & Dm & Dn & Dd { Dd = VectorUnsignedDotProduct(Dn,Dm); } -:vudot.u8 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x18 & c2021=0 & c0811=0xd & c0606=1 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x18 & thv_c2021=0 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=1) ) & Qm & Qn & Qd +:vudot.u8 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=1 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = VectorUnsignedDotProduct(Qn,Qm); } -:vsudot.u8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=0 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=1) ) & Dm0 & Mindex & Dn & Dd +:vsudot.u8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=0 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=1) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorSignedUnsignedDotProduct(Dn,Dm0,Mindex); } -:vsudot.u8 Qd,Qn,Qm0^Mindex is ( ($(AMODE) & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=1 & c0404=1) | - ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=1) ) & Qm0 & Mindex & Qn & Qd +:vsudot.u8 Qd,Qn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=1 & c0404=1) | + ($(TMODE_F) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=1) ) & Dm0 & Mindex & Qn & Qd { - Qd = VectorSignedUnsignedDotProduct(Qn,Qm0,Mindex); + Qd = VectorSignedUnsignedDotProduct(Qn,Dm0,Mindex); } -:vusdot.u8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=0 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm0 & Mindex & Dn & Dd +:vusdot.u8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorUnsignedSignedDotProduct(Dn,Dm0,Mindex); } -:vusdot.u8 Qd,Qn,Qm0^Mindex is ( ($(AMODE) & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=1 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Qm0 & Mindex & Qn & Qd +:vusdot.u8 Qd,Qn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Dm0 & Mindex & Qn & Qd { - Qd = VectorUnsignedSignedDotProduct(Qn,Qm0,Mindex); + Qd = VectorUnsignedSignedDotProduct(Qn,Dm0,Mindex); } -:vusdot.u8 Dd,Dn,Dm is ( ($(AMODE) & cond=15 & c2327=0x19 & c2021=2 & c0811=0xd & c0606=0 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm & Dn & Dd +:vusdot.u8 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x19 & c2021=2 & c0811=0xd & c0606=0 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm & Dn & Dd { Dd = VectorUnsignedSignedDotProduct(Dn,Dm); } -:vusdot.u8 Qd,Qn,Qm is ( ($(AMODE) & cond=15 & c2327=0x19 & c2021=2 & c0811=0xd & c0606=1 & c0404=0) | - ($(TMODE_E) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Qm & Qn & Qd +:vusdot.u8 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x19 & c2021=2 & c0811=0xd & c0606=1 & c0404=0) | + ($(TMODE_F) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Qm & Qn & Qd { Qd = VectorUnsignedSignedDotProduct(Qn,Qm); } @@ -6082,13 +6093,13 @@ define pcodeop VectorUnsignedSignedDotProduct; define pcodeop VectorUnzip; -:vuzp^esize1819 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=2 & Q6=0 & c0404=0 ) | +:vuzp^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=2 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=2 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 { Dd = VectorUnzip(Dm,esize1819); } -:vuzp^esize1819 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=2 & Q6=1 & c0404=0 ) | +:vuzp^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=2 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=2 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 { Qd = VectorUnzip(Qm,esize1819); @@ -6101,13 +6112,13 @@ define pcodeop VectorUnzip; define pcodeop VectorZip; -:vzip^esize1819 Dd,Dm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=3 & Q6=0 & c0404=0 ) | +:vzip^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=3 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=3 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 { Dd = VectorZip(Dm,esize1819); } -:vzip^esize1819 Qd,Qm is ( ( $(AMODE) & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=3 & Q6=1 & c0404=0 ) | +:vzip^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=3 & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=3 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 { Qd = VectorZip(Qm,esize1819); diff --git a/Ghidra/Processors/ARM/data/languages/ARMv8.sinc b/Ghidra/Processors/ARM/data/languages/ARMv8.sinc index 9a4f32e37a..3520d6a303 100644 --- a/Ghidra/Processors/ARM/data/languages/ARMv8.sinc +++ b/Ghidra/Processors/ARM/data/languages/ARMv8.sinc @@ -434,7 +434,7 @@ define pcodeop PolynomialMult; # F6.1.59 p8000 A1 op == 1 (c0808) :vcvt.f32.f16 Qd,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b01 & c1617=0b10 & c0911=0b011 & c0607=0b00 & c0404=0 & c0808=1) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b01 & c1617=0b10 & c0911=0b011 & c0607=0b00 & c0404=0 & c0808=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b01 & thv_c1617=0b10 & thv_c0911=0b011 & thv_c0607=0b00 & thv_c0404=0 & thv_c0808=1)) & Qd & Dm { @@ -443,7 +443,7 @@ define pcodeop PolynomialMult; # F6.1.59 p8000 A1 op == 0 (c0808) :vcvt.f16.f32 Dd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b01 & c1617=0b10 & c0911=0b011 & c0607=0b00 & c0404=0 & c0808=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b01 & c1617=0b10 & c0911=0b011 & c0607=0b00 & c0404=0 & c0808=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b01 & thv_c1617=0b10 & thv_c0911=0b011 & thv_c0607=0b00 & thv_c0404=0 & thv_c0808=0)) & Dd & Qm { Dd = float2float(Qm); } @@ -492,14 +492,14 @@ vcvt_56_128_dt: ".u32.f32" # F6.1.60 p8002 A1 Q == 0 (c0606) :vcvt^vcvt_56_64_dt Dd,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c0911=0b011 & c0404=0 & c0606=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c0911=0b011 & c0404=0 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c0911=0b011 & thv_c0404=0 & thv_c0606=0)) & vcvt_56_64_dt & Dd & Dm unimpl # F6.1.60 p8002 A1 Q == 1 (c0606) :vcvt^vcvt_56_128_dt Qd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c0911=0b011 & c0404=0 & c0606=1) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c0911=0b011 & c0404=0 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c0911=0b011 & thv_c0404=0 & thv_c0606=1)) & vcvt_56_128_dt & Qd & Qm unimpl @@ -576,44 +576,84 @@ vcvt_59_fbits: "#"^fbits is TMode=0 & c1621 [ fbits = 64 - c1621; ] { vcvt_59_fbits: "#"^fbits is TMode=1 & thv_c1621 [ fbits = 64 - thv_c1621; ] { } vcvt_59_32_dt: ".f32.s32" - is ((TMode=0 & c0808=0 & c2424=0) - | (TMode=1 & thv_c0808=0 & thv_c2828=0)) + is ((TMode=0 & c0809=2 & c2424=0) + | (TMode=1 & thv_c0809=2 & thv_c2828=0)) & Dd & Dm & vcvt_59_fbits_built { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_59_32_dt: ".f32.u32" - is ((TMode=0 & c0808=0 & c2424=1) - | (TMode=1 & thv_c0808=0 & thv_c2828=1)) + is ((TMode=0 & c0809=2 & c2424=1) + | (TMode=1 & thv_c0809=2 & thv_c2828=1)) & Dd & Dm & vcvt_59_fbits_built { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_59_32_dt: ".s32.f32" - is ((TMode=0 & c0808=1 & c2424=0) - | (TMode=1 & thv_c0808=1 & thv_c2828=0)) + is ((TMode=0 & c0809=3 & c2424=0) + | (TMode=1 & thv_c0809=3 & thv_c2828=0)) & Dd & Dm & vcvt_59_fbits_built { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_59_32_dt: ".u32.f32" - is ((TMode=0 & c0808=1 & c2424=1) - | (TMode=1 & thv_c0808=1 & thv_c2828=1)) + is ((TMode=0 & c0809=3 & c2424=1) + | (TMode=1 & thv_c0809=3 & thv_c2828=1)) & Dd & Dm & vcvt_59_fbits_built { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } - +vcvt_59_32_dt: ".f16.s16" + is ((TMode=0 & c0809=0 & c2424=0) + | (TMode=1 & thv_c0809=0 & thv_c2828=0)) + & Dd & Dm & vcvt_59_fbits_built + { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_59_32_dt: ".f16.u16" + is ((TMode=0 & c0809=0 & c2424=1) + | (TMode=1 & thv_c0809=0 & thv_c2828=1)) + & Dd & Dm & vcvt_59_fbits_built + { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_59_32_dt: ".s16.f16" + is ((TMode=0 & c0809=1 & c2424=0) + | (TMode=1 & thv_c0809=1 & thv_c2828=0)) + & Dd & Dm & vcvt_59_fbits_built + { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } +vcvt_59_32_dt: ".u16.f16" + is ((TMode=0 & c0809=1 & c2424=1) + | (TMode=1 & thv_c0809=1 & thv_c2828=1)) + & Dd & Dm & vcvt_59_fbits_built + { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } + vcvt_59_64_dt: ".f32.s32" - is ((TMode=0 & c0808=0 & c2424=0) - | (TMode=1 & thv_c0808=0 & thv_c2828=0)) + is ((TMode=0 & c0809=2 & c2424=0) + | (TMode=1 & thv_c0809=2 & thv_c2828=0)) & Qd & Qm & vcvt_59_fbits_built { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_59_64_dt: ".f32.u32" - is ((TMode=0 & c0808=0 & c2424=1) - | (TMode=1 & thv_c0808=0 & thv_c2828=1)) + is ((TMode=0 & c0809=2 & c2424=1) + | (TMode=1 & thv_c0809=2 & thv_c2828=1)) & Qd & Qm & vcvt_59_fbits_built { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_59_64_dt: ".s32.f32" - is ((TMode=0 & c0808=1 & c2424=0) - | (TMode=1 & thv_c0808=1 & thv_c2828=0)) + is ((TMode=0 & c0809=3 & c2424=0) + | (TMode=1 & thv_c0809=3 & thv_c2828=0)) & Qd & Qm & vcvt_59_fbits_built { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_59_64_dt: ".u32.f32" - is ((TMode=0 & c0808=1 & c2424=1) - | (TMode=1 & thv_c0808=1 & thv_c2828=1)) + is ((TMode=0 & c0809=3 & c2424=1) + | (TMode=1 & thv_c0809=3 & thv_c2828=1)) + & Qd & Qm & vcvt_59_fbits_built + { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } +vcvt_59_64_dt: ".f16.s16" + is ((TMode=0 & c0809=0 & c2424=0) + | (TMode=1 & thv_c0809=0 & thv_c2828=0)) + & Qd & Qm & vcvt_59_fbits_built + { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } +vcvt_59_64_dt: ".f16.u16" + is ((TMode=0 & c0809=0 & c2424=1) + | (TMode=1 & thv_c0809=0 & thv_c2828=1)) + & Qd & Qm & vcvt_59_fbits_built + { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } +vcvt_59_64_dt: ".s16.f16" + is ((TMode=0 & c0809=1 & c2424=0) + | (TMode=1 & thv_c0809=1 & thv_c2828=0)) + & Qd & Qm & vcvt_59_fbits_built + { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } +vcvt_59_64_dt: ".u16.f16" + is ((TMode=0 & c0809=1 & c2424=1) + | (TMode=1 & thv_c0809=1 & thv_c2828=1)) & Qd & Qm & vcvt_59_fbits_built { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } @@ -622,15 +662,15 @@ vcvt_59_64_dt: ".u32.f32" # F6.1.63 p8012 A1 Q = 0 (c0606) :vcvt^vcvt_59_32_dt Dd,Dm,vcvt_59_fbits - is ((TMode=0 & c2831=0b1111 & c2527=0b001 & c2323=1 & c2121=1 & c0911=0b111 & c0707=0 & c0404=1 & c0606=0) - | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & thv_c2121=1 & thv_c0911=0b111 & thv_c0707=0 & thv_c0404=1 & thv_c0606=0)) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2527=0b001 & c2323=1 & c2121=1 & c1011=0b11 & c0707=0 & c0404=1 & c0606=0) + | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & thv_c2121=1 & thv_c1011=0b11 & thv_c0707=0 & thv_c0404=1 & thv_c0606=0)) & vcvt_59_32_dt & vcvt_59_fbits & Dd & Dm unimpl # F6.1.63 p8012 A1 Q = 1 (c0606) :vcvt^vcvt_59_64_dt Qd,Qm,vcvt_59_fbits - is ((TMode=0 & c2831=0b1111 & c2527=0b001 & c2323=1 & c2121=1 & c0911=0b111 & c0707=0 & c0404=1 & c0606=1) - | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & thv_c2121=1 & thv_c0911=0b111 & thv_c0707=0 & thv_c0404=1 & thv_c0606=1)) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2527=0b001 & c2323=1 & c2121=1 & c1011=0b11 & c0707=0 & c0404=1 & c0606=1) + | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & thv_c2121=1 & thv_c1011=0b11 & thv_c0707=0 & thv_c0404=1 & thv_c0606=1)) & vcvt_59_64_dt & vcvt_59_fbits & Qd & Qm unimpl @@ -771,14 +811,14 @@ vcvt_amnp_simd_128_dt: ".u32" is TMode=1 & thv_c0707=1 & thv_c0809 & vcvt_amnp_s # F6.1.65,69,71,73 p8019,8028,8032,8036 A1 64-bit SIMD vector variant Q = 0 (c0606) :vcvt^vcvt_amnp_simd_RM^vcvt_amnp_simd_64_dt^".f32" Dd,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c1011=0b00 & c0404=0 & c0606=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c1011=0b00 & c0404=0 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c1011=0b00 & thv_c0404=0 & thv_c0606=0)) & vcvt_amnp_simd_RM & vcvt_amnp_simd_64_dt & Dd & Dm unimpl # F6.1.65,69,71,73 p8019,8028,8032,8036 A1 128-bit SIMD vector variant Q = 1(c0606) :vcvt^vcvt_amnp_simd_RM^vcvt_amnp_simd_128_dt^".f32" Qd,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c1011=0b00 & c0404=0 & c0606=1) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c1011=0b00 & c0404=0 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c1011=0b00 & thv_c0404=0 & thv_c0606=1)) & vcvt_amnp_simd_RM & vcvt_amnp_simd_128_dt & Qd & Qm unimpl @@ -810,16 +850,23 @@ vcvt_amnp_fp_d_dt: ".u32" is TMode=1 & thv_c0707=0 & thv_c1617 & vcvt_amnp_fp_RM vcvt_amnp_fp_d_dt: ".s32" is TMode=0 & c0707=1 & c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } vcvt_amnp_fp_d_dt: ".s32" is TMode=1 & thv_c0707=1 & thv_c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } +# F6.1.66,70,72,74 p8021,8030,8034,8038 Single-precision scalar variant size = 01 (c0809) +:vcvt^vcvt_amnp_fp_RM^vcvt_amnp_fp_s_dt^".f16" Sd,Sm + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b01) + | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b11 & thv_c1819=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b01)) + & vcvt_amnp_fp_RM & vcvt_amnp_fp_s_dt & Sd & Sm + unimpl + # F6.1.66,70,72,74 p8021,8030,8034,8038 Single-precision scalar variant size = 11 (c0809) :vcvt^vcvt_amnp_fp_RM^vcvt_amnp_fp_s_dt^".f32" Sd,Sm - is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b11 & thv_c1819=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) & vcvt_amnp_fp_RM & vcvt_amnp_fp_s_dt & Sd & Sm unimpl -# F6.1.66,70,72,74 p8021,8030,8034,8038 Double-precision scalar variant size = 10 (c0809) +# F6.1.66,70,72,74 p8021,8030,8034,8038 Double-precision scalar variant size = 11 (c0809) :vcvt^vcvt_amnp_fp_RM^vcvt_amnp_fp_d_dt^".f64" Sd,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b11 & thv_c1819=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) & vcvt_amnp_fp_RM & vcvt_amnp_fp_d_dt & Sd & Dm unimpl @@ -950,98 +997,98 @@ define pcodeop FPMinNum; # F6.1.117 p8178 A1/T1 Q = 0 (c0606) :vmaxnm^".f32" Dd,Dn,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1111 & c0404=1 & c0606=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1111 & c0404=1 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) & Dd & Dn & Dm { Dd = FPMaxNum(Dn, Dm); } # F6.1.117 p8178 A1/T1 Q = 1 (c0606) :vmaxnm^".f32" Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1111 & c0404=1 & c0606=1) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1111 & c0404=1 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) & Qd & Qn & Qm { Qd = FPMaxNum(Qn, Qm); } # F6.1.117 p8178 A1/T1 Q = 0 (c0606) :vmaxnm^".f16" Dd,Dn,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1111 & c0404=1 & c0606=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1111 & c0404=1 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) & Dd & Dn & Dm { Dd = FPMaxNum(Dn, Dm); } # F6.1.117 p8178 A1/T1 Q = 1 (c0606) :vmaxnm^".f16" Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1111 & c0404=1 & c0606=1) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1111 & c0404=1 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) & Qd & Qn & Qm { Qd = FPMaxNum(Qn, Qm); } # F6.1.117 p8178 A2/T2 size = 01 (c0809) :vmaxnm^".f16" Sd,Sn,Sm - is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b01) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b01) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b01)) & Sd & Sn & Sm { Sd = FPMaxNum(Sn, Sm); } # F6.1.117 p8178 A2/T2 size = 10 (c0809) :vmaxnm^".f32" Sd,Sn,Sm - is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b10) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b10)) & Sd & Sn & Sm { Sd = FPMaxNum(Sn, Sm); } # F6.1.117 p8178 A2/T2 size = 11 (c0809) :vmaxnm^".f64" Dd,Dn,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b11) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b11)) & Dd & Dn & Dm { Dd = FPMaxNum(Dn, Dm); } # F6.1.120 p8178 A1/T1 Q = 0 (c0606) :vminnm^".f32" Dd,Dn,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1111 & c0404=1 & c0606=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1111 & c0404=1 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) & Dd & Dn & Dm { Dd = FPMinNum(Dn, Dm); } # F6.1.120 p8178 A1/T1 Q = 1 (c0606) :vminnm^".f32" Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1111 & c0404=1 & c0606=1) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1111 & c0404=1 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) & Qd & Qn & Qm { Qd = FPMinNum(Qn, Qm); } # F6.1.120 p8178 A1/T1 Q = 0 (c0606) :vminnm^".f16" Dd,Dn,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b11 & c0811=0b1111 & c0404=1 & c0606=0) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b11 & c0811=0b1111 & c0404=1 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b11 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) & Dd & Dn & Dm { Dd = FPMinNum(Dn, Dm); } # F6.1.120 p8178 A1/T1 Q = 1 (c0606) :vminnm^".f16" Qd,Qn,Qm - is ((TMode=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b11 & c0811=0b1111 & c0404=1 & c0606=1) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b11 & c0811=0b1111 & c0404=1 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b11 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) & Qd & Qn & Qm { Qd = FPMinNum(Qn, Qm); } # F6.1.120 p8178 A2/T2 size = 01 (c0809) :vminnm^".f16" Sd,Sn,Sm - is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b01) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b01) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b01)) & Sd & Sn & Sm { Sd = FPMinNum(Sn, Sm); } # F6.1.120 p8178 A2/T2 size = 10 (c0809) :vminnm^".f32" Sd,Sn,Sm - is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) & Sd & Sn & Sm { Sd = FPMinNum(Sn, Sm); } # F6.1.120 p8178 A2/T2 size = 11 (c0809) :vminnm^".f64" Dd,Dn,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) & Dd & Dn & Dm { Dd = FPMinNum(Dn, Dm); } @@ -1091,15 +1138,15 @@ vmull_dt: ".p64" # F6.1.149 p8266 VMULL (-integer and +polynomial) op=1 (c0909) (with condition U!=1 and size!=0b11 and size!=01) :vmull^vmull_dt Qd,Dn,Dm - is ((TMode=0 & c2531=0b1111001 & c2424=0 & c2323=1 & ( c2121 & c2020=0) & c1011=0b11 & c0808=0 & c0606=0 & c0404=0 & c0909=1) - | (TMode=1 & thv_c2931=0b111 & thv_c2828=0 & thv_c2327=0b11111 & (thv_c2121 & thv_c2020=0) & thv_c1011=0b11 & thv_c0808=0 & thv_c0606=0 & thv_c0404=0 & thv_c0909=1)) + is ((TMode=0 & ARMcond=0 & c2531=0b1111001 & c2424=0 & c2323=1 & ( c2121 & c2020=0) & c1011=0b11 & c0808=0 & c0606=0 & c0404=0 & c0909=1) + | (TMode=1 & thv_c2931=0b111 & thv_c2828=0 & thv_c2327=0b11111 & (thv_c2121 & thv_c2020=0) & thv_c1011=0b11 & thv_c0808=0 & thv_c0606=0 & thv_c0404=0 & thv_c0909=1)) & vmull_dt & Qd & Dn & Dm { Qd = PolynomialMult(Dn, Dm); } # F6.1.149 p8266 VMULL (+integer and -polynomial) op=0 (c0909) (with condition size!=0b11) :vmull^vmull_dt Qd,Dn,Dm - is ((TMode=0 & c2531=0b1111001 & c2424 & c2323=1 & ( c2121=0 | c2020=0) & c1011=0b11 & c0808=0 & c0606=0 & c0404=0 & c0909=0) - | (TMode=1 & thv_c2931=0b111 & thv_c2828 & thv_c2327=0b11111 & (thv_c2121=0 | thv_c2020=0) & thv_c1011=0b11 & thv_c0808=0 & thv_c0606=0 & thv_c0404=0 & thv_c0909=0)) + is ((TMode=0 & ARMcond=0 & c2531=0b1111001 & c2323=1 & ( c2121=0 | c2020=0) & c1011=0b11 & c0808=0 & c0606=0 & c0404=0 & c0909=0) + | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & (thv_c2121=0 | thv_c2020=0) & thv_c1011=0b11 & thv_c0808=0 & thv_c0606=0 & thv_c0404=0 & thv_c0909=0)) & vmull_dt & Qd & Dn & Dm { Qd = VectorMultiply(Dn, Dm); } @@ -1201,14 +1248,14 @@ vrint_fp_RM: "p" # F6.1.200,202,204,206 p8398,8402,8406,8410 size = 10 (c0809) :vrint^vrint_fp_RM^".f32" Sd,Sm - is ((TMode=0 & c2331=0b111111101 & c1821=0b1110 & c1011=0b10 & c0607=0b01 & c0404=0 & c0809=0b10) + is ((TMode=0 & ARMcond=0 & c2331=0b111111101 & c1821=0b1110 & c1011=0b10 & c0607=0b01 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2331=0b111111101 & thv_c1821=0b1110 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c0809=0b10)) & vrint_fp_RM & Sd & Sm { Sd = FPRoundInt(Sm, 32:1, vrint_fp_RM, 0:1); } # F6.1.200,202,204,206 p8398,8402,8406,8410 size = 11 (c0809) :vrint^vrint_fp_RM^".f64" Dd,Dm - is ((TMode=0 & c2331=0b111111101 & c1821=0b1110 & c1011=0b10 & c0607=0b01 & c0404=0 & c0809=0b11) + is ((TMode=0 & ARMcond=0 & c2331=0b111111101 & c1821=0b1110 & c1011=0b10 & c0607=0b01 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2331=0b111111101 & thv_c1821=0b1110 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c0809=0b11)) & vrint_fp_RM & Dd & Dm { Dd = FPRoundInt(Dm, 32:1, vrint_fp_RM, 0:1); } @@ -1286,14 +1333,14 @@ vselcond: "vs" # F6.1.223 p8447 A1/T1 size = 11 doubleprec (c0809) :vsel^vselcond^".f64" Dd,Dn,Dm - is ((TMode=0 & c2831=0b1111 & c2327=0b11100 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b11) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11100 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11100 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b11)) & vselcond & Dn & Dd & Dm { Dd = zext(vselcond != 0) * Dn + zext(vselcond == 0) * Dm; } # F6.1.223 p8447 A1/T1 size = 10 singleprec (c0809) :vsel^vselcond".f32" Sd,Sn,Sm - is ((TMode=0 & c2831=0b1111 & c2327=0b11100 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b10) + is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11100 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11100 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b10)) & vselcond & Sn & Sd & Sm { Sd = zext(vselcond != 0) * Sn + zext(vselcond == 0) * Sm; }