This page displays common pseudocode functions shared by many pages
// AArch32.AT() // ============ // Perform address translation as per AT instructions. AArch32.AT(bits(32) vaddress, TranslationStage stage_in, bits(2) el, ATAccess ataccess) TranslationStage stage = stage_in; SecurityState ss; Regime regime; boolean eae; // ATS1Hx instructions if el == EL2 then regime = Regime_EL2; eae = TRUE; ss = SS_NonSecure; // ATS1Cxx instructions elsif stage == TranslationStage_1 || (stage == TranslationStage_12 && !HaveEL(EL2)) then stage = TranslationStage_1; ss = SecurityStateAtEL(PSTATE.EL); regime = if ss == SS_Secure && ELUsingAArch32(EL3) then Regime_EL30 else Regime_EL10; eae = TTBCR.EAE == '1'; // ATS12NSOxx instructions else regime = Regime_EL10; eae = if HaveAArch32EL(EL3) then TTBCR_NS.EAE == '1' else TTBCR.EAE == '1'; ss = SS_NonSecure; AddressDescriptor addrdesc; SDFType sdftype; boolean aligned = TRUE; bit supersection = '0'; accdesc = CreateAccDescAT(ss, el, ataccess); // Prepare fault fields in case a fault is detected fault = NoFault(accdesc); if eae then (fault, addrdesc) = AArch32.S1TranslateLD(fault, regime, vaddress, aligned, accdesc); else (fault, addrdesc, sdftype) = AArch32.S1TranslateSD(fault, regime, vaddress, aligned, accdesc); supersection = if sdftype == SDFType_Supersection then '1' else '0'; // ATS12NSOxx instructions if stage == TranslationStage_12 && fault.statuscode == Fault_None then (fault, addrdesc) = AArch32.S2Translate(fault, addrdesc, aligned, accdesc); if fault.statuscode != Fault_None then // Take exception on External abort or when a fault occurs on translation table walk if IsExternalAbort(fault) || (PSTATE.EL == EL1 && EL2Enabled() && fault.s2fs1walk) then PAR = bits(64) UNKNOWN; AArch32.Abort(vaddress, fault); addrdesc.fault = fault; if (eae || (stage == TranslationStage_12 && (HCR.VM == '1' || HCR.DC == '1')) || (stage == TranslationStage_1 && el != EL2 && PSTATE.EL == EL2)) then AArch32.EncodePARLD(addrdesc, ss); else AArch32.EncodePARSD(addrdesc, supersection, ss); return;
// AArch32.EncodePARLD() // ===================== // Returns 64-bit format PAR on address translation instruction. AArch32.EncodePARLD(AddressDescriptor addrdesc, SecurityState ss) if !IsFault(addrdesc) then bit ns; if ss == SS_NonSecure then ns = bit UNKNOWN; elsif addrdesc.paddress.paspace == PAS_Secure then ns = '0'; else ns = '1'; PAR.F = '0'; PAR.SH = ReportedPARShareability(PAREncodeShareability(addrdesc.memattrs)); PAR.NS = ns; PAR<10> = bit IMPLEMENTATION_DEFINED "Non-Faulting PAR"; // IMPDEF PAR.LPAE = '1'; PAR.PA = addrdesc.paddress.address<39:12>; PAR.ATTR = ReportedPARAttrs(EncodePARAttrs(addrdesc.memattrs)); else PAR.F = '1'; PAR.FST = AArch32.PARFaultStatusLD(addrdesc.fault); PAR.S2WLK = if addrdesc.fault.s2fs1walk then '1' else '0'; PAR.FSTAGE = if addrdesc.fault.secondstage then '1' else '0'; PAR.LPAE = '1'; PAR<63:48> = bits(16) IMPLEMENTATION_DEFINED "Faulting PAR"; // IMPDEF return;
// AArch32.EncodePARSD() // ===================== // Returns 32-bit format PAR on address translation instruction. AArch32.EncodePARSD(AddressDescriptor addrdesc_in, bit supersection, SecurityState ss) AddressDescriptor addrdesc = addrdesc_in; if !IsFault(addrdesc) then if (addrdesc.memattrs.memtype == MemType_Device || (addrdesc.memattrs.inner.attrs == MemAttr_NC && addrdesc.memattrs.outer.attrs == MemAttr_NC)) then addrdesc.memattrs.shareability = Shareability_OSH; bit ns; if ss == SS_NonSecure then ns = bit UNKNOWN; elsif addrdesc.paddress.paspace == PAS_Secure then ns = '0'; else ns = '1'; bits(2) sh = if addrdesc.memattrs.shareability != Shareability_NSH then '01' else '00'; PAR.F = '0'; PAR.SS = supersection; PAR.Outer = AArch32.ReportedOuterAttrs(AArch32.PAROuterAttrs(addrdesc.memattrs)); PAR.Inner = AArch32.ReportedInnerAttrs(AArch32.PARInnerAttrs(addrdesc.memattrs)); PAR.SH = ReportedPARShareability(sh); PAR<8> = bit IMPLEMENTATION_DEFINED "Non-Faulting PAR"; // IMPDEF PAR.NS = ns; PAR.NOS = if addrdesc.memattrs.shareability == Shareability_OSH then '0' else '1'; PAR.LPAE = '0'; PAR.PA = addrdesc.paddress.address<39:12>; else PAR.F = '1'; PAR.FST = AArch32.PARFaultStatusSD(addrdesc.fault); PAR.LPAE = '0'; PAR<31:16> = bits(16) IMPLEMENTATION_DEFINED "Faulting PAR"; // IMPDEF return;
// AArch32.PARFaultStatusLD() // ========================== // Fault status field decoding of 64-bit PAR bits(6) AArch32.PARFaultStatusLD(FaultRecord fault) bits(6) syndrome; if fault.statuscode == Fault_Domain then // Report Domain fault assert fault.level IN {1,2}; syndrome<1:0> = if fault.level == 1 then '01' else '10'; syndrome<5:2> = '1111'; else syndrome = EncodeLDFSC(fault.statuscode, fault.level); return syndrome;
// AArch32.PARFaultStatusSD() // ========================== // Fault status field decoding of 32-bit PAR. bits(6) AArch32.PARFaultStatusSD(FaultRecord fault) bits(6) syndrome; syndrome<5> = if IsExternalAbort(fault) then fault.extflag else '0'; syndrome<4:0> = EncodeSDFSC(fault.statuscode, fault.level); return syndrome;
// AArch32.PARInnerAttrs() // ======================= // Convert orthogonal attributes and hints to 32-bit PAR Inner field. bits(3) AArch32.PARInnerAttrs(MemoryAttributes memattrs) bits(3) result; if memattrs.memtype == MemType_Device then if memattrs.device == DeviceType_nGnRnE then result = '001'; // Non-cacheable elsif memattrs.device == DeviceType_nGnRE then result = '011'; // Non-cacheable else MemAttrHints inner = memattrs.inner; if inner.attrs == MemAttr_NC then result = '000'; // Non-cacheable elsif inner.attrs == MemAttr_WB && inner.hints<0> == '1' then result = '101'; // Write-Back, Write-Allocate elsif inner.attrs == MemAttr_WT then result = '110'; // Write-Through elsif inner.attrs == MemAttr_WB && inner.hints<0> == '0' then result = '111'; // Write-Back, no Write-Allocate return result;
// AArch32.PAROuterAttrs() // ======================= // Convert orthogonal attributes and hints to 32-bit PAR Outer field. bits(2) AArch32.PAROuterAttrs(MemoryAttributes memattrs) bits(2) result; if memattrs.memtype == MemType_Device then result = bits(2) UNKNOWN; else MemAttrHints outer = memattrs.outer; if outer.attrs == MemAttr_NC then result = '00'; // Non-cacheable elsif outer.attrs == MemAttr_WB && outer.hints<0> == '1' then result = '01'; // Write-Back, Write-Allocate elsif outer.attrs == MemAttr_WT && outer.hints<0> == '0' then result = '10'; // Write-Through, no Write-Allocate elsif outer.attrs == MemAttr_WB && outer.hints<0> == '0' then result = '11'; // Write-Back, no Write-Allocate return result;
// AArch32.ReportedInnerAttrs() // ============================ // The value returned in this field can be the resulting attribute, as determined by any permitted // implementation choices and any applicable configuration bits, instead of the value that appears // in the translation table descriptor. bits(3) AArch32.ReportedInnerAttrs(bits(3) attrs);
// AArch32.ReportedOuterAttrs() // ============================ // The value returned in this field can be the resulting attribute, as determined by any permitted // implementation choices and any applicable configuration bits, instead of the value that appears // in the translation table descriptor. bits(2) AArch32.ReportedOuterAttrs(bits(2) attrs);
// AArch32.DC() // ============ // Perform Data Cache Operation. AArch32.DC(bits(32) regval, CacheOp cacheop, CacheOpScope opscope) CacheRecord cache; cache.acctype = AccessType_DC; cache.cacheop = cacheop; cache.opscope = opscope; cache.cachetype = CacheType_Data; cache.security = SecurityStateAtEL(PSTATE.EL); if opscope == CacheOpScope_SetWay then cache.shareability = Shareability_NSH; (cache.setnum, cache.waynum, cache.level) = DecodeSW(ZeroExtend(regval, 64), CacheType_Data); if (cacheop == CacheOp_Invalidate && PSTATE.EL == EL1 && EL2Enabled() && ((!ELUsingAArch32(EL2) && (HCR_EL2.SWIO == '1' || HCR_EL2.<DC,VM> != '00')) || (ELUsingAArch32(EL2) && (HCR.SWIO == '1' || HCR.<DC,VM> != '00')))) then cache.cacheop = CacheOp_CleanInvalidate; CACHE_OP(cache); return; if EL2Enabled() then if PSTATE.EL IN {EL0, EL1} then cache.is_vmid_valid = TRUE; cache.vmid = VMID[]; else cache.is_vmid_valid = FALSE; else cache.is_vmid_valid = FALSE; if PSTATE.EL == EL0 then cache.is_asid_valid = TRUE; cache.asid = ASID[]; else cache.is_asid_valid = FALSE; need_translate = DCInstNeedsTranslation(opscope); vaddress = regval; size = 0; // by default no watchpoint address if cacheop == CacheOp_Invalidate then size = integer IMPLEMENTATION_DEFINED "Data Cache Invalidate Watchpoint Size"; assert size >= 4*(2^(UInt(CTR_EL0.DminLine))) && size <= 2048; assert UInt(size<32:0> AND (size-1)<32:0>) == 0; // size is power of 2 vaddress = Align(regval, size); cache.translated = need_translate; cache.vaddress = ZeroExtend(vaddress, 64); if need_translate then boolean aligned = TRUE; AccessDescriptor accdesc = CreateAccDescDC(cache); AddressDescriptor memaddrdesc = AArch32.TranslateAddress(vaddress, accdesc, aligned, size); if IsFault(memaddrdesc) then AArch32.Abort(regval, memaddrdesc.fault); cache.paddress = memaddrdesc.paddress; if opscope == CacheOpScope_PoC then cache.shareability = memaddrdesc.memattrs.shareability; else cache.shareability = Shareability_NSH; else cache.shareability = Shareability UNKNOWN; cache.paddress = FullAddress UNKNOWN; if (cacheop == CacheOp_Invalidate && PSTATE.EL == EL1 && EL2Enabled() && ((!ELUsingAArch32(EL2) && HCR_EL2.<DC,VM> != '00') || (ELUsingAArch32(EL2) && HCR.<DC,VM> != '00'))) then cache.cacheop = CacheOp_CleanInvalidate; CACHE_OP(cache); return;
// AArch32.VCRMatch() // ================== boolean AArch32.VCRMatch(bits(32) vaddress) boolean match; if UsingAArch32() && ELUsingAArch32(EL1) && PSTATE.EL != EL2 then // Each bit position in this string corresponds to a bit in DBGVCR and an exception vector. match_word = Zeros(32); ss = CurrentSecurityState(); if vaddress<31:5> == ExcVectorBase()<31:5> then if HaveEL(EL3) && ss == SS_NonSecure then match_word<UInt(vaddress<4:2>) + 24> = '1'; // Non-secure vectors else match_word<UInt(vaddress<4:2>) + 0> = '1'; // Secure vectors (or no EL3) if (HaveEL(EL3) && ELUsingAArch32(EL3) && vaddress<31:5> == MVBAR<31:5> && ss == SS_Secure) then match_word<UInt(vaddress<4:2>) + 8> = '1'; // Monitor vectors // Mask out bits not corresponding to vectors. bits(32) mask; if !HaveEL(EL3) then mask = '00000000':'00000000':'00000000':'11011110'; // DBGVCR[31:8] are RES0 elsif !ELUsingAArch32(EL3) then mask = '11011110':'00000000':'00000000':'11011110'; // DBGVCR[15:8] are RES0 else mask = '11011110':'00000000':'11011100':'11011110'; match_word = match_word AND DBGVCR AND mask; match = !IsZero(match_word); // Check for UNPREDICTABLE case - match on Prefetch Abort and Data Abort vectors if !IsZero(match_word<28:27,12:11,4:3>) && DebugTarget() == PSTATE.EL then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHDAPA); if !IsZero(vaddress<1:0>) && match then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHHALF); else match = FALSE; return match;
// AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // ======================================================== boolean AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // The definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled returns // the state of the (DBGEN AND SPIDEN) signal. if !HaveEL(EL3) && NonSecureOnlyImplementation() then return FALSE; return DBGEN == Signal_High && SPIDEN == Signal_High;
// AArch32.BreakpointMatch() // ========================= // Breakpoint matching in an AArch32 translation regime. (boolean,boolean) AArch32.BreakpointMatch(integer n, bits(32) vaddress, AccessDescriptor accdesc, integer size) assert ELUsingAArch32(S1TranslationRegime()); assert n < NumBreakpointsImplemented(); enabled = DBGBCR[n].E == '1'; isbreakpnt = TRUE; linked = DBGBCR[n].BT IN {'0x01'}; linked_to = FALSE; linked_n = UInt(DBGBCR[n].LBN); state_match = AArch32.StateMatch(DBGBCR[n].SSC, DBGBCR[n].HMC, DBGBCR[n].PMC, linked, linked_n, isbreakpnt, accdesc); (value_match, value_mismatch) = AArch32.BreakpointValueMatch(n, vaddress, linked_to); if size == 4 then // Check second halfword // If the breakpoint address and BAS of an Address breakpoint match the address of the // second halfword of an instruction, but not the address of the first halfword, it is // CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug // event. (match_i, mismatch_i) = AArch32.BreakpointValueMatch(n, vaddress + 2, linked_to); if !value_match && match_i then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if value_mismatch && !mismatch_i then value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF); if vaddress<1> == '1' && DBGBCR[n].BAS == '1111' then // The above notwithstanding, if DBGBCR[n].BAS == '1111', then it is CONSTRAINED // UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction // at the address DBGBVR[n]+2. if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if !value_mismatch then value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF); match = value_match && state_match && enabled; mismatch = value_mismatch && state_match && enabled; return (match, mismatch);
// AArch32.BreakpointValueMatch() // ============================== // The first result is whether an Address Match or Context breakpoint is programmed on the // instruction at "address". The second result is whether an Address Mismatch breakpoint is // programmed on the instruction, that is, whether the instruction should be stepped. (boolean, boolean) AArch32.BreakpointValueMatch(integer n_in, bits(32) vaddress, boolean linked_to) // "n" is the identity of the breakpoint unit to match against. // "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context // matching breakpoints. // "linked_to" is TRUE if this is a call from StateMatch for linking. integer n = n_in; Constraint c; // If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives // no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint. if n >= NumBreakpointsImplemented() then (c, n) = ConstrainUnpredictableInteger(0, NumBreakpointsImplemented() - 1, Unpredictable_BPNOTIMPL); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (FALSE, FALSE); // If this breakpoint is not enabled, it cannot generate a match. // (This could also happen on a call from StateMatch for linking). if DBGBCR[n].E == '0' then return (FALSE, FALSE); dbgtype = DBGBCR[n].BT; (c, dbgtype) = AArch32.ReservedBreakpointType(n, dbgtype); if c == Constraint_DISABLED then return (FALSE, FALSE); // Otherwise the dbgtype value returned by AArch32.ReservedBreakpointType is valid. // Determine what to compare against. match_addr = (dbgtype IN {'0x0x'}); mismatch = (dbgtype IN {'010x'}); match_vmid = (dbgtype IN {'10xx'}); match_cid1 = (dbgtype IN {'xx1x'}); match_cid2 = (dbgtype IN {'11xx'}); linking_enabled = (dbgtype IN {'xxx1'}); // If called from StateMatch, is is CONSTRAINED UNPREDICTABLE if the // breakpoint is not programmed with linking enabled. if linked_to && !linking_enabled then if !ConstrainUnpredictableBool(Unpredictable_BPLINKINGDISABLED) then return (FALSE, FALSE); // If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches. if !linked_to && linking_enabled && !match_addr then return (FALSE, FALSE); boolean bvr_match = FALSE; boolean bxvr_match = FALSE; // Do the comparison. if match_addr then integer byte = UInt(vaddress<1:0>); assert byte IN {0,2}; // "vaddress" is halfword aligned boolean byte_select_match = (DBGBCR[n].BAS<byte> == '1'); bvr_match = (vaddress<31:2> == DBGBVR[n]<31:2>) && byte_select_match; elsif match_cid1 then bvr_match = (PSTATE.EL != EL2 && CONTEXTIDR == DBGBVR[n]<31:0>); if match_vmid then bits(16) vmid; bits(16) bvr_vmid; if ELUsingAArch32(EL2) then vmid = ZeroExtend(VTTBR.VMID, 16); bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16); elsif !IsFeatureImplemented(FEAT_VMID16) || VTCR_EL2.VS == '0' then vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16); else vmid = VTTBR_EL2.VMID; bvr_vmid = DBGBXVR[n]<15:0>; bxvr_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && vmid == bvr_vmid); elsif match_cid2 then bxvr_match = (PSTATE.EL != EL3 && EL2Enabled() && !ELUsingAArch32(EL2) && DBGBXVR[n]<31:0> == CONTEXTIDR_EL2<31:0>); bvr_match_valid = (match_addr || match_cid1); bxvr_match_valid = (match_vmid || match_cid2); match = (!bxvr_match_valid || bxvr_match) && (!bvr_match_valid || bvr_match); return (match && !mismatch, !match && mismatch);
// AArch32.ReservedBreakpointType() // ================================ // Checks if the given DBGBCR<n>.BT value is reserved and will generate Constrained Unpredictable // behavior, otherwise returns Constraint_NONE. (Constraint, bits(4)) AArch32.ReservedBreakpointType(integer n, bits(4) bt_in) bits(4) bt = bt_in; boolean reserved = FALSE; context_aware = IsContextAwareBreakpoint(n); // Address mismatch if bt IN {'010x'} && HaltOnBreakpointOrWatchpoint() then reserved = TRUE; // Context matching if !(bt IN {'0x0x'}) && !context_aware then reserved = TRUE; // EL2 extension if bt IN {'1xxx'} && !HaveEL(EL2) then reserved = TRUE; // Context matching if (bt IN {'011x','11xx'} && !IsFeatureImplemented(FEAT_VHE) && !IsFeatureImplemented(FEAT_Debugv8p2)) then reserved = TRUE; if reserved then Constraint c; (c, bt) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE, 4); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (c, bits(4) UNKNOWN); // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value return (Constraint_NONE, bt);
// AArch32.StateMatch() // ==================== // Determine whether a breakpoint or watchpoint is enabled in the current mode and state. boolean AArch32.StateMatch(bits(2) ssc_in, bit hmc_in, bits(2) pxc_in, boolean linked_in, integer linked_n_in, boolean isbreakpnt, AccessDescriptor accdesc) // "ssc_in","hmc_in","pxc_in" are the control fields from the DBGBCR[n] or DBGWCR[n] register. // "linked_in" is TRUE if this is a linked breakpoint/watchpoint type. // "linked_n_in" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register. // "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints. // "accdesc" describes the properties of the access being matched. bit hmc = hmc_in; bits(2) ssc = ssc_in; bits(2) pxc = pxc_in; boolean linked = linked_in; integer linked_n = linked_n_in; // If parameters are set to a reserved type, behaves as either disabled or a defined type Constraint c; // SSCE value discarded as there is no SSCE bit in AArch32. (c, ssc, -, hmc, pxc) = CheckValidStateMatch(ssc, '0', hmc, pxc, isbreakpnt); if c == Constraint_DISABLED then return FALSE; // Otherwise the hmc,ssc,pxc values are either valid or the values returned by // CheckValidStateMatch are valid. pl2_match = HaveEL(EL2) && ((hmc == '1' && (ssc:pxc != '1000')) || ssc == '11'); pl1_match = pxc<0> == '1'; pl0_match = pxc<1> == '1'; ssu_match = isbreakpnt && hmc == '0' && pxc == '00' && ssc != '11'; boolean priv_match; if ssu_match then priv_match = PSTATE.M IN {M32_User,M32_Svc,M32_System}; else case accdesc.el of when EL3 priv_match = pl1_match; // EL3 and EL1 are both PL1 when EL2 priv_match = pl2_match; when EL1 priv_match = pl1_match; when EL0 priv_match = pl0_match; // Security state match boolean ss_match; case ssc of when '00' ss_match = TRUE; // Both when '01' ss_match = accdesc.ss == SS_NonSecure; // Non-secure only when '10' ss_match = accdesc.ss == SS_Secure; // Secure only when '11' ss_match = (hmc == '1' || accdesc.ss == SS_Secure); // HMC=1 -> Both, // HMC=0 -> Secure only boolean linked_match = FALSE; if linked then // "linked_n" must be an enabled context-aware breakpoint unit. // If it is not context-aware then it is CONSTRAINED UNPREDICTABLE whether // this gives no match, gives a match without linking, or linked_n is mapped to some // UNKNOWN breakpoint that is context-aware. if !IsContextAwareBreakpoint(linked_n) then (first_ctx_cmp, last_ctx_cmp) = ContextAwareBreakpointRange(); (c, linked_n) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE linked = FALSE; // No linking // Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint vaddress = bits(32) UNKNOWN; linked_to = TRUE; (linked_match,-) = AArch32.BreakpointValueMatch(linked_n, vaddress, linked_to); return priv_match && ss_match && (!linked || linked_match);
// AArch32.GenerateDebugExceptions() // ================================= boolean AArch32.GenerateDebugExceptions() ss = CurrentSecurityState(); return AArch32.GenerateDebugExceptionsFrom(PSTATE.EL, ss);
// AArch32.GenerateDebugExceptionsFrom() // ===================================== boolean AArch32.GenerateDebugExceptionsFrom(bits(2) from_el, SecurityState from_state) if !ELUsingAArch32(DebugTargetFrom(from_state)) then mask = '0'; // No PSTATE.D in AArch32 state return AArch64.GenerateDebugExceptionsFrom(from_el, from_state, mask); if DBGOSLSR.OSLK == '1' || DoubleLockStatus() || Halted() then return FALSE; boolean enabled; if HaveEL(EL3) && from_state == SS_Secure then assert from_el != EL2; // Secure EL2 always uses AArch64 if IsSecureEL2Enabled() then // Implies that EL3 and EL2 both using AArch64 enabled = MDCR_EL3.SDD == '0'; else spd = if ELUsingAArch32(EL3) then SDCR.SPD else MDCR_EL3.SPD32; if spd<1> == '1' then enabled = spd<0> == '1'; else // SPD == 0b01 is reserved, but behaves the same as 0b00. enabled = AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled(); if from_el == EL0 then enabled = enabled || SDER.SUIDEN == '1'; else enabled = from_el != EL2; return enabled;
// AArch32.IncrementCycleCounter() // =============================== // Increment the cycle counter and possibly set overflow bits. AArch32.IncrementCycleCounter() if !CountPMUEvents(CYCLE_COUNTER_ID) then return; bit d = PMCR.D; // Check divide-by-64 bit lc = PMCR.LC; // Effective value of 'D' bit is 0 when Effective value of LC is '1' if lc == '1' then d = '0'; if d == '1' && !HasElapsed64Cycles() then return; integer old_value = UInt(PMCCNTR); integer new_value = old_value + 1; PMCCNTR = new_value<63:0>; constant integer ovflw = if lc == '1' then 64 else 32; if old_value<64:ovflw> != new_value<64:ovflw> then PMOVSSET.C = '1'; PMOVSR.C = '1'; return;
// AArch32.IncrementEventCounter() // =============================== // Increment the specified event counter 'idx' by the specified amount 'increment'. // 'Vm' is the value event counter 'idx-1' is being incremented by if 'idx' is odd, // zero otherwise. // Returns the amount the counter was incremented by. integer AArch32.IncrementEventCounter(integer idx, integer increment_in, integer Vm) if HaveAArch64() then // Force the counter to be incremented as a 64-bit counter. return AArch64.IncrementEventCounter(idx, increment_in, Vm); // In this model, event counters in an AArch32-only implementation are 32 bits and // the LP bits are RES0 in this model, even if FEAT_PMUv3p5 is implemented. integer old_value; integer new_value; old_value = UInt(PMEVCNTR[idx]); integer increment = PMUCountValue(idx, increment_in, Vm); new_value = old_value + increment; PMEVCNTR[idx] = new_value<31:0>; constant integer ovflw = 32; if old_value<64:ovflw> != new_value<64:ovflw> then PMOVSSET<idx> = '1'; PMOVSR<idx> = '1'; // Check for the CHAIN event from an even counter if idx<0> == '0' && idx + 1 < GetNumEventCounters() then PMUEvent(PMU_EVENT_CHAIN, 1, idx + 1); return increment;
// AArch32.PMUCycle() // ================== // Called at the end of each cycle to increment event counters and // check for PMU overflow. In pseudocode, a cycle ends after the // execution of the operational pseudocode. AArch32.PMUCycle() if HaveAArch64() then AArch64.PMUCycle(); return; if !IsFeatureImplemented(FEAT_PMUv3) then return; PMUEvent(PMU_EVENT_CPU_CYCLES); integer counters = GetNumEventCounters(); integer Vm = 0; if counters != 0 then for idx = 0 to counters - 1 if CountPMUEvents(idx) then integer accumulated = PMUEventAccumulator[idx]; if (idx MOD 2) == 0 then Vm = 0; Vm = AArch32.IncrementEventCounter(idx, accumulated, Vm); PMUEventAccumulator[idx] = 0; AArch32.IncrementCycleCounter(); CheckForPMUOverflow();
// AArch32.EnterHypModeInDebugState() // ================================== // Take an exception in Debug state to Hyp mode. AArch32.EnterHypModeInDebugState(ExceptionRecord except) SynchronizeContext(); assert HaveEL(EL2) && CurrentSecurityState() == SS_NonSecure && ELUsingAArch32(EL2); AArch32.ReportHypEntry(except); AArch32.WriteMode(M32_Hyp); SPSR_curr[] = bits(32) UNKNOWN; ELR_hyp = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; if IsFeatureImplemented(FEAT_Debugv8p9) then DSPSR2 = bits(32) UNKNOWN; PSTATE.E = HSCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if IsFeatureImplemented(FEAT_SSBS) then PSTATE.SSBS = bit UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); EndOfInstruction();
// AArch32.EnterModeInDebugState() // =============================== // Take an exception in Debug state to a mode other than Monitor and Hyp mode. AArch32.EnterModeInDebugState(bits(5) target_mode) SynchronizeContext(); assert ELUsingAArch32(EL1) && PSTATE.EL != EL2; if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(target_mode); SPSR_curr[] = bits(32) UNKNOWN; R[14] = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; if IsFeatureImplemented(FEAT_Debugv8p9) then DSPSR2 = bits(32) UNKNOWN; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if IsFeatureImplemented(FEAT_PAN) && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if IsFeatureImplemented(FEAT_SSBS) then PSTATE.SSBS = bit UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. EndOfInstruction();
// AArch32.EnterMonitorModeInDebugState() // ====================================== // Take an exception in Debug state to Monitor mode. AArch32.EnterMonitorModeInDebugState() SynchronizeContext(); assert HaveEL(EL3) && ELUsingAArch32(EL3); from_secure = CurrentSecurityState() == SS_Secure; if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(M32_Monitor); SPSR_curr[] = bits(32) UNKNOWN; R[14] = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if IsFeatureImplemented(FEAT_PAN) then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if IsFeatureImplemented(FEAT_SSBS) then PSTATE.SSBS = bit UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; if IsFeatureImplemented(FEAT_Debugv8p9) then DSPSR2 = bits(32) UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. EndOfInstruction();
// AArch32.WatchpointByteMatch() // ============================= boolean AArch32.WatchpointByteMatch(integer n, bits(32) vaddress) constant integer dbgtop = 31; constant integer cmpbottom = if DBGWVR[n]<2> == '1' then 2 else 3; // Word or doubleword integer bottom = cmpbottom; constant integer select = UInt(vaddress<cmpbottom-1:0>); byte_select_match = (DBGWCR[n].BAS<select> != '0'); mask = UInt(DBGWCR[n].MASK); // If DBGWCR[n].MASK is a nonzero value and DBGWCR[n].BAS is not set to '11111111', or // DBGWCR[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED // UNPREDICTABLE. if mask > 0 && !IsOnes(DBGWCR[n].BAS) then byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPMASKANDBAS); else LSB = (DBGWCR[n].BAS AND NOT(DBGWCR[n].BAS - 1)); MSB = (DBGWCR[n].BAS + LSB); if !IsZero(MSB AND (MSB - 1)) then // Not contiguous byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS); bottom = 3; // For the whole doubleword // If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE. if mask > 0 && mask <= 2 then Constraint c; (c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE mask = 0; // No masking // Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value constant integer cmpmsb = dbgtop; constant integer cmplsb = if mask > bottom then mask else bottom; constant integer bottombit = bottom; boolean WVR_match = (vaddress<cmpmsb:cmplsb> == DBGWVR[n]<cmpmsb:cmplsb>); if mask > bottom then // If masked bits of DBGWVR[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE. if WVR_match && !IsZero(DBGWVR[n]<cmplsb-1:bottombit>) then WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITS); return (WVR_match && byte_select_match);
// AArch32.WatchpointMatch() // ========================= // Watchpoint matching in an AArch32 translation regime. boolean AArch32.WatchpointMatch(integer n, bits(32) vaddress, integer size, AccessDescriptor accdesc) assert ELUsingAArch32(S1TranslationRegime()); assert n < NumWatchpointsImplemented(); boolean enabled = DBGWCR[n].E == '1'; linked = DBGWCR[n].WT == '1'; isbreakpnt = FALSE; linked_n = UInt(DBGWCR_EL1[n].LBN); state_match = AArch32.StateMatch(DBGWCR[n].SSC, DBGWCR[n].HMC, DBGWCR[n].PAC, linked, linked_n, isbreakpnt, accdesc); boolean ls_match; case DBGWCR[n].LSC<1:0> of when '00' ls_match = FALSE; when '01' ls_match = accdesc.read; when '10' ls_match = accdesc.write || accdesc.acctype == AccessType_DC; when '11' ls_match = TRUE; boolean value_match = FALSE; for byte = 0 to size - 1 value_match = value_match || AArch32.WatchpointByteMatch(n, vaddress + byte); return value_match && state_match && ls_match && enabled;
// AArch32.Abort() // =============== // Abort and Debug exception handling in an AArch32 translation regime. AArch32.Abort(bits(32) vaddress, FaultRecord fault) // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = (HCR_EL2.TGE == '1' || IsSecondStage(fault) || (IsFeatureImplemented(FEAT_RAS) && HCR_EL2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && MDCR_EL2.TDE == '1')); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = EffectiveEA() == '1' && IsExternalAbort(fault); if route_to_aarch64 then AArch64.Abort(ZeroExtend(vaddress, 64), fault); elsif fault.accessdesc.acctype == AccessType_IFETCH then AArch32.TakePrefetchAbortException(vaddress, fault); else AArch32.TakeDataAbortException(vaddress, fault);
// AArch32.AbortSyndrome() // ======================= // Creates an exception syndrome record for Abort exceptions // taken to Hyp mode // from an AArch32 translation regime. ExceptionRecord AArch32.AbortSyndrome(Exception exceptype, FaultRecord fault, bits(32) vaddress, bits(2) target_el) except = ExceptionSyndrome(exceptype); except.syndrome = AArch32.FaultSyndrome(exceptype, fault); except.vaddress = ZeroExtend(vaddress, 64); if IPAValid(fault) then except.ipavalid = TRUE; except.NS = if fault.ipaddress.paspace == PAS_NonSecure then '1' else '0'; except.ipaddress = ZeroExtend(fault.ipaddress.address, 56); else except.ipavalid = FALSE; return except;
// AArch32.CheckPCAlignment() // ========================== AArch32.CheckPCAlignment() bits(32) pc = ThisInstrAddr(32); if (CurrentInstrSet() == InstrSet_A32 && pc<1> == '1') || pc<0> == '1' then if AArch32.GeneralExceptionsToAArch64() then AArch64.PCAlignmentFault(); AccessDescriptor accdesc = CreateAccDescIFetch(); FaultRecord fault = NoFault(accdesc); // Generate an Alignment fault Prefetch Abort exception fault.statuscode = Fault_Alignment; AArch32.Abort(pc, fault);
// AArch32.CommonFaultStatus() // =========================== // Return the common part of the fault status on reporting a Data // or Prefetch Abort. bits(32) AArch32.CommonFaultStatus(FaultRecord fault, boolean long_format) bits(32) target = Zeros(32); if IsFeatureImplemented(FEAT_RAS) && IsAsyncAbort(fault) then ErrorState errstate = PEErrorState(fault); target<15:14> = AArch32.EncodeAsyncErrorSyndrome(errstate); // AET if IsExternalAbort(fault) then target<12> = fault.extflag; // ExT target<9> = if long_format then '1' else '0'; // LPAE if long_format then // Long-descriptor format target<5:0> = EncodeLDFSC(fault.statuscode, fault.level); // STATUS else // Short-descriptor format target<10,3:0> = EncodeSDFSC(fault.statuscode, fault.level); // FS return target;
// AArch32.ReportDataAbort() // ========================= // Report syndrome information for aborts taken to modes other than Hyp mode. AArch32.ReportDataAbort(boolean route_to_monitor, FaultRecord fault, bits(32) vaddress) boolean long_format; if route_to_monitor && CurrentSecurityState() != SS_Secure then long_format = ((TTBCR_S.EAE == '1') || (IsExternalSyncAbort(fault) && ((PSTATE.EL == EL2 || TTBCR.EAE == '1') || (fault.secondstage && (boolean IMPLEMENTATION_DEFINED "Report abort using Long-descriptor format"))))); else long_format = TTBCR.EAE == '1'; bits(32) syndrome = AArch32.CommonFaultStatus(fault, long_format); // bits of syndrome that are not common to I and D side if fault.accessdesc.acctype IN {AccessType_DC, AccessType_IC, AccessType_AT} then syndrome<13> = '1'; // CM syndrome<11> = '1'; // WnR else syndrome<11> = if fault.write then '1' else '0'; // WnR if !long_format then syndrome<7:4> = fault.domain; // Domain if fault.accessdesc.acctype == AccessType_IC then bits(32) i_syndrome; if (!long_format && boolean IMPLEMENTATION_DEFINED "Report I-cache maintenance fault in IFSR") then i_syndrome = syndrome; syndrome<10,3:0> = EncodeSDFSC(Fault_ICacheMaint, 1); else i_syndrome = bits(32) UNKNOWN; if route_to_monitor then IFSR_S = i_syndrome; else IFSR = i_syndrome; if route_to_monitor then DFSR_S = syndrome; DFAR_S = vaddress; else DFSR = syndrome; DFAR = vaddress; return;
// AArch32.ReportPrefetchAbort() // ============================= // Report syndrome information for aborts taken to modes other than Hyp mode. AArch32.ReportPrefetchAbort(boolean route_to_monitor, FaultRecord fault, bits(32) vaddress) // The encoding used in the IFSR can be Long-descriptor format or Short-descriptor format. // Normally, the current translation table format determines the format. For an abort from // Non-secure state to Monitor mode, the IFSR uses the Long-descriptor format if any of the // following applies: // * The Secure TTBCR.EAE is set to 1. // * It is taken from Hyp mode. // * It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1. long_format = FALSE; if route_to_monitor && CurrentSecurityState() != SS_Secure then long_format = TTBCR_S.EAE == '1' || PSTATE.EL == EL2 || TTBCR.EAE == '1'; else long_format = TTBCR.EAE == '1'; bits(32) fsr = AArch32.CommonFaultStatus(fault, long_format); if route_to_monitor then IFSR_S = fsr; IFAR_S = vaddress; else IFSR = fsr; IFAR = vaddress; return;
// AArch32.TakeDataAbortException() // ================================ AArch32.TakeDataAbortException(bits(32) vaddress, FaultRecord fault) route_to_monitor = HaveEL(EL3) && EffectiveEA() == '1' && IsExternalAbort(fault); route_to_hyp = (EL2Enabled() && PSTATE.EL IN {EL0, EL1} && (HCR.TGE == '1' || (IsFeatureImplemented(FEAT_RAS) && HCR2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && HDCR.TDE == '1') || IsSecondStage(fault))); bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x10; lr_offset = 8; if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe; if route_to_monitor then AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then except = AArch32.AbortSyndrome(Exception_DataAbort, fault, vaddress, EL2); if PSTATE.EL == EL2 then AArch32.EnterHypMode(except, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(except, preferred_exception_return, 0x14); else AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePrefetchAbortException() // ==================================== AArch32.TakePrefetchAbortException(bits(32) vaddress, FaultRecord fault) route_to_monitor = HaveEL(EL3) && EffectiveEA() == '1' && IsExternalAbort(fault); route_to_hyp = (EL2Enabled() && PSTATE.EL IN {EL0, EL1} && (HCR.TGE == '1' || (IsFeatureImplemented(FEAT_RAS) && HCR2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && HDCR.TDE == '1') || IsSecondStage(fault))); ExceptionRecord except; bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x0C; lr_offset = 4; if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe; if route_to_monitor then AArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then if fault.statuscode == Fault_Alignment then // PC Alignment fault except = ExceptionSyndrome(Exception_PCAlignment); except.vaddress = ThisInstrAddr(64); else except = AArch32.AbortSyndrome(Exception_InstructionAbort, fault, vaddress, EL2); if PSTATE.EL == EL2 then AArch32.EnterHypMode(except, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(except, preferred_exception_return, 0x14); else AArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalFIQException() // ================================== AArch32.TakePhysicalFIQException() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.FMO == '1' && !IsInHost()); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.FIQ == '1'; if route_to_aarch64 then AArch64.TakePhysicalFIQException(); route_to_monitor = HaveEL(EL3) && SCR.FIQ == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.FMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x1C; lr_offset = 4; if route_to_monitor then AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then except = ExceptionSyndrome(Exception_FIQ); AArch32.EnterHypMode(except, preferred_exception_return, vect_offset); else AArch32.EnterMode(M32_FIQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalIRQException() // ================================== // Take an enabled physical IRQ exception. AArch32.TakePhysicalIRQException() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.IMO == '1' && !IsInHost()); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.IRQ == '1'; if route_to_aarch64 then AArch64.TakePhysicalIRQException(); route_to_monitor = HaveEL(EL3) && SCR.IRQ == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.IMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x18; lr_offset = 4; if route_to_monitor then AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then except = ExceptionSyndrome(Exception_IRQ); AArch32.EnterHypMode(except, preferred_exception_return, vect_offset); else AArch32.EnterMode(M32_IRQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalSErrorException() // ===================================== AArch32.TakePhysicalSErrorException(boolean implicit_esb) // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = (HCR_EL2.TGE == '1' || (!IsInHost() && HCR_EL2.AMO == '1')); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = EffectiveEA() == '1'; if route_to_aarch64 then AArch64.TakePhysicalSErrorException(implicit_esb); route_to_monitor = HaveEL(EL3) && SCR.EA == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.AMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x10; lr_offset = 8; bits(2) target_el; if route_to_monitor then target_el = EL3; elsif PSTATE.EL == EL2 || route_to_hyp then target_el = EL2; else target_el = EL1; FaultRecord fault = GetPendingPhysicalSError(); vaddress = bits(32) UNKNOWN; except = AArch32.AbortSyndrome(Exception_DataAbort, fault, vaddress, target_el); if IsSErrorEdgeTriggered() then ClearPendingPhysicalSError(); case target_el of when EL3 AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); when EL2 if PSTATE.EL == EL2 then AArch32.EnterHypMode(except, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(except, preferred_exception_return, 0x14); when EL1 AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset); otherwise Unreachable();
// AArch32.TakeVirtualFIQException() // ================================= AArch32.TakeVirtualFIQException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual IRQ enabled if TGE==0 and FMO==1 assert HCR.TGE == '0' && HCR.FMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualFIQException(); bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x1C; lr_offset = 4; AArch32.EnterMode(M32_FIQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualIRQException() // ================================= AArch32.TakeVirtualIRQException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual IRQs enabled if TGE==0 and IMO==1 assert HCR.TGE == '0' && HCR.IMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualIRQException(); bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x18; lr_offset = 4; AArch32.EnterMode(M32_IRQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualSErrorException() // ==================================== AArch32.TakeVirtualSErrorException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual SError enabled if TGE==0 and AMO==1 assert HCR.TGE == '0' && HCR.AMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualSErrorException(); route_to_monitor = FALSE; bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x10; lr_offset = 8; vaddress = bits(32) UNKNOWN; parity = FALSE; Fault fault = Fault_AsyncExternal; integer level = integer UNKNOWN; bits(32) fsr = Zeros(32); if IsFeatureImplemented(FEAT_RAS) then if ELUsingAArch32(EL2) then fsr<15:14> = VDFSR.AET; fsr<12> = VDFSR.ExT; else fsr<15:14> = VSESR_EL2.AET; fsr<12> = VSESR_EL2.ExT; else fsr<12> = bit IMPLEMENTATION_DEFINED "Virtual External abort type"; if TTBCR.EAE == '1' then // Long-descriptor format fsr<9> = '1'; fsr<5:0> = EncodeLDFSC(fault, level); else // Short-descriptor format fsr<9> = '0'; fsr<10,3:0> = EncodeSDFSC(fault, level); DFSR = fsr; DFAR = bits(32) UNKNOWN; ClearPendingVirtualSError(); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.SoftwareBreakpoint() // ============================ AArch32.SoftwareBreakpoint(bits(16) immediate) if (EL2Enabled() && !ELUsingAArch32(EL2) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')) || !ELUsingAArch32(EL1) then AArch64.SoftwareBreakpoint(immediate); accdesc = CreateAccDescIFetch(); fault = NoFault(accdesc); vaddress = bits(32) UNKNOWN; fault.statuscode = Fault_Debug; fault.debugmoe = DebugException_BKPT; AArch32.Abort(vaddress, fault);
constant bits(4) DebugException_Breakpoint = '0001'; constant bits(4) DebugException_BKPT = '0011'; constant bits(4) DebugException_VectorCatch = '0101'; constant bits(4) DebugException_Watchpoint = '1010';
// AArch32.CheckAdvSIMDOrFPRegisterTraps() // ======================================= // Check if an instruction that accesses an Advanced SIMD and // floating-point System register is trapped by an appropriate HCR.TIDx // ID group trap control. AArch32.CheckAdvSIMDOrFPRegisterTraps(bits(4) reg) if PSTATE.EL == EL1 && EL2Enabled() then tid0 = if ELUsingAArch32(EL2) then HCR.TID0 else HCR_EL2.TID0; tid3 = if ELUsingAArch32(EL2) then HCR.TID3 else HCR_EL2.TID3; if ((tid0 == '1' && reg == '0000') || // FPSID (tid3 == '1' && reg IN {'0101', '0110', '0111'})) then // MVFRx if ELUsingAArch32(EL2) then AArch32.SystemAccessTrap(M32_Hyp, 0x8); else AArch64.AArch32SystemAccessTrap(EL2, 0x8);
// AArch32.ExceptionClass() // ======================== // Returns the Exception Class and Instruction Length fields to be reported in HSR (integer,bit) AArch32.ExceptionClass(Exception exceptype) il_is_valid = TRUE; integer ec; case exceptype of when Exception_Uncategorized ec = 0x00; il_is_valid = FALSE; when Exception_WFxTrap ec = 0x01; when Exception_CP15RTTrap ec = 0x03; when Exception_CP15RRTTrap ec = 0x04; when Exception_CP14RTTrap ec = 0x05; when Exception_CP14DTTrap ec = 0x06; when Exception_AdvSIMDFPAccessTrap ec = 0x07; when Exception_FPIDTrap ec = 0x08; when Exception_PACTrap ec = 0x09; when Exception_TSTARTAccessTrap ec = 0x1B; when Exception_GPC ec = 0x1E; when Exception_CP14RRTTrap ec = 0x0C; when Exception_BranchTarget ec = 0x0D; when Exception_IllegalState ec = 0x0E; il_is_valid = FALSE; when Exception_SupervisorCall ec = 0x11; when Exception_HypervisorCall ec = 0x12; when Exception_MonitorCall ec = 0x13; when Exception_InstructionAbort ec = 0x20; il_is_valid = FALSE; when Exception_PCAlignment ec = 0x22; il_is_valid = FALSE; when Exception_DataAbort ec = 0x24; when Exception_NV2DataAbort ec = 0x25; when Exception_FPTrappedException ec = 0x28; when Exception_PMU ec = 0x3D; otherwise Unreachable(); if ec IN {0x20,0x24} && PSTATE.EL == EL2 then ec = ec + 1; bit il; if il_is_valid then il = if ThisInstrLength() == 32 then '1' else '0'; else il = '1'; return (ec,il);
// AArch32.GeneralExceptionsToAArch64() // ==================================== // Returns TRUE if exceptions normally routed to EL1 are being handled at an Exception // level using AArch64, because either EL1 is using AArch64 or TGE is in force and EL2 // is using AArch64. boolean AArch32.GeneralExceptionsToAArch64() return ((PSTATE.EL == EL0 && !ELUsingAArch32(EL1)) || (EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1'));
// AArch32.ReportHypEntry() // ======================== // Report syndrome information to Hyp mode registers. AArch32.ReportHypEntry(ExceptionRecord except) Exception exceptype = except.exceptype; (ec,il) = AArch32.ExceptionClass(exceptype); iss = except.syndrome; iss2 = except.syndrome2; // IL is not valid for Data Abort exceptions without valid instruction syndrome information if ec IN {0x24,0x25} && iss<24> == '0' then il = '1'; HSR = ec<5:0>:il:iss; if exceptype IN {Exception_InstructionAbort, Exception_PCAlignment} then HIFAR = except.vaddress<31:0>; HDFAR = bits(32) UNKNOWN; elsif exceptype == Exception_DataAbort then HIFAR = bits(32) UNKNOWN; HDFAR = except.vaddress<31:0>; if except.ipavalid then HPFAR<31:4> = except.ipaddress<39:12>; else HPFAR<31:4> = bits(28) UNKNOWN; return;
// AArch32.ResetControlRegisters() // =============================== // Resets System registers and memory-mapped control registers that have architecturally-defined // reset values to those values. AArch32.ResetControlRegisters(boolean cold_reset);
// AArch32.TakeReset() // =================== // Reset into AArch32 state AArch32.TakeReset(boolean cold_reset) assert !HaveAArch64(); // Enter the highest implemented Exception level in AArch32 state if HaveEL(EL3) then AArch32.WriteMode(M32_Svc); SCR.NS = '0'; // Secure state elsif HaveEL(EL2) then AArch32.WriteMode(M32_Hyp); else AArch32.WriteMode(M32_Svc); // Reset System registers in the coproc=0b111x encoding space // and other system components AArch32.ResetControlRegisters(cold_reset); FPEXC.EN = '0'; // Reset all other PSTATE fields, including instruction set and endianness according to the // SCTLR values produced by the above call to ResetControlRegisters() PSTATE.<A,I,F> = '111'; // All asynchronous exceptions masked PSTATE.IT = '00000000'; // IT block state reset if HaveEL(EL2) && !HaveEL(EL3) then PSTATE.T = HSCTLR.TE; // Instruction set: TE=0:A32, TE=1:T32. PSTATE.J is RES0. PSTATE.E = HSCTLR.EE; // Endianness: EE=0: little-endian, EE=1: big-endian. else PSTATE.T = SCTLR.TE; // Instruction set: TE=0:A32, TE=1:T32. PSTATE.J is RES0. PSTATE.E = SCTLR.EE; // Endianness: EE=0: little-endian, EE=1: big-endian. PSTATE.IL = '0'; // Clear Illegal Execution state bit // All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call // below are UNKNOWN bitstrings after reset. In particular, the return information registers // R14 or ELR_hyp and SPSR have UNKNOWN values, so that it // is impossible to return from a reset in an architecturally defined way. AArch32.ResetGeneralRegisters(); if IsFeatureImplemented(FEAT_SME) || IsFeatureImplemented(FEAT_SVE) then ResetSVERegisters(); else AArch32.ResetSIMDFPRegisters(); AArch32.ResetSpecialRegisters(); ResetExternalDebugRegisters(cold_reset); bits(32) rv; // IMPLEMENTATION DEFINED reset vector if HaveEL(EL3) then if MVBAR<0> == '1' then // Reset vector in MVBAR rv = MVBAR<31:1>:'0'; else rv = bits(32) IMPLEMENTATION_DEFINED "reset vector address"; else rv = RVBAR<31:1>:'0'; // The reset vector must be correctly aligned assert rv<0> == '0' && (PSTATE.T == '1' || rv<1> == '0'); boolean branch_conditional = FALSE; EDPRSR.R = '0'; // Leaving Reset State. BranchTo(rv, BranchType_RESET, branch_conditional);
// ExcVectorBase() // =============== bits(32) ExcVectorBase() if SCTLR.V == '1' then // Hivecs selected, base = 0xFFFF0000 return Ones(16):Zeros(16); else return VBAR<31:5>:Zeros(5);
// AArch32.FPTrappedException() // ============================ AArch32.FPTrappedException(bits(8) accumulated_exceptions) if AArch32.GeneralExceptionsToAArch64() then is_ase = FALSE; element = 0; AArch64.FPTrappedException(is_ase, accumulated_exceptions); FPEXC.DEX = '1'; FPEXC.TFV = '1'; FPEXC<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF FPEXC<10:8> = '111'; // VECITR is RES1 AArch32.TakeUndefInstrException();
// AArch32.CallHypervisor() // ======================== // Performs a HVC call AArch32.CallHypervisor(bits(16) immediate) assert HaveEL(EL2); if !ELUsingAArch32(EL2) then AArch64.CallHypervisor(immediate); else AArch32.TakeHVCException(immediate);
// AArch32.CallSupervisor() // ======================== // Calls the Supervisor AArch32.CallSupervisor(bits(16) immediate_in) bits(16) immediate = immediate_in; if AArch32.CurrentCond() != '1110' then immediate = bits(16) UNKNOWN; if AArch32.GeneralExceptionsToAArch64() then AArch64.CallSupervisor(immediate); else AArch32.TakeSVCException(immediate);
// AArch32.TakeHVCException() // ========================== AArch32.TakeHVCException(bits(16) immediate) assert HaveEL(EL2) && ELUsingAArch32(EL2); AArch32.ITAdvance(); SSAdvance(); bits(32) preferred_exception_return = NextInstrAddr(32); vect_offset = 0x08; except = ExceptionSyndrome(Exception_HypervisorCall); except.syndrome<15:0> = immediate; if PSTATE.EL == EL2 then AArch32.EnterHypMode(except, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(except, preferred_exception_return, 0x14);
// AArch32.TakeSMCException() // ========================== AArch32.TakeSMCException() assert HaveEL(EL3) && ELUsingAArch32(EL3); AArch32.ITAdvance(); HSAdvance(); SSAdvance(); bits(32) preferred_exception_return = NextInstrAddr(32); vect_offset = 0x08; lr_offset = 0; AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeSVCException() // ========================== AArch32.TakeSVCException(bits(16) immediate) AArch32.ITAdvance(); SSAdvance(); route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = NextInstrAddr(32); vect_offset = 0x08; lr_offset = 0; if PSTATE.EL == EL2 || route_to_hyp then except = ExceptionSyndrome(Exception_SupervisorCall); except.syndrome<15:0> = immediate; if PSTATE.EL == EL2 then AArch32.EnterHypMode(except, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(except, preferred_exception_return, 0x14); else AArch32.EnterMode(M32_Svc, preferred_exception_return, lr_offset, vect_offset);
// AArch32.EnterHypMode() // ====================== // Take an exception to Hyp mode. AArch32.EnterHypMode(ExceptionRecord except, bits(32) preferred_exception_return, integer vect_offset) SynchronizeContext(); assert HaveEL(EL2) && CurrentSecurityState() == SS_NonSecure && ELUsingAArch32(EL2); if Halted() then AArch32.EnterHypModeInDebugState(except); return; bits(32) spsr = GetPSRFromPSTATE(AArch32_NonDebugState, 32); if !(except.exceptype IN {Exception_IRQ, Exception_FIQ}) then AArch32.ReportHypEntry(except); AArch32.WriteMode(M32_Hyp); SPSR_curr[] = spsr; ELR_hyp = preferred_exception_return; PSTATE.T = HSCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; if !HaveEL(EL3) || SCR_curr[].EA == '0' then PSTATE.A = '1'; if !HaveEL(EL3) || SCR_curr[].IRQ == '0' then PSTATE.I = '1'; if !HaveEL(EL3) || SCR_curr[].FIQ == '0' then PSTATE.F = '1'; PSTATE.E = HSCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if IsFeatureImplemented(FEAT_SSBS) then PSTATE.SSBS = HSCTLR.DSSBS; boolean branch_conditional = FALSE; BranchTo(HVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION, branch_conditional); CheckExceptionCatch(TRUE); // Check for debug event on exception entry EndOfInstruction();
// AArch32.EnterMode() // =================== // Take an exception to a mode other than Monitor and Hyp mode. AArch32.EnterMode(bits(5) target_mode, bits(32) preferred_exception_return, integer lr_offset, integer vect_offset) SynchronizeContext(); assert ELUsingAArch32(EL1) && PSTATE.EL != EL2; if Halted() then AArch32.EnterModeInDebugState(target_mode); return; bits(32) spsr = GetPSRFromPSTATE(AArch32_NonDebugState, 32); if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(target_mode); SPSR_curr[] = spsr; R[14] = preferred_exception_return + lr_offset; PSTATE.T = SCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; if target_mode == M32_FIQ then PSTATE.<A,I,F> = '111'; elsif target_mode IN {M32_Abort, M32_IRQ} then PSTATE.<A,I> = '11'; else PSTATE.I = '1'; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if IsFeatureImplemented(FEAT_PAN) && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if IsFeatureImplemented(FEAT_SSBS) then PSTATE.SSBS = SCTLR.DSSBS; boolean branch_conditional = FALSE; BranchTo(ExcVectorBase()<31:5>:vect_offset<4:0>, BranchType_EXCEPTION, branch_conditional); CheckExceptionCatch(TRUE); // Check for debug event on exception entry EndOfInstruction();
// AArch32.EnterMonitorMode() // ========================== // Take an exception to Monitor mode. AArch32.EnterMonitorMode(bits(32) preferred_exception_return, integer lr_offset, integer vect_offset) SynchronizeContext(); assert HaveEL(EL3) && ELUsingAArch32(EL3); from_secure = CurrentSecurityState() == SS_Secure; if Halted() then AArch32.EnterMonitorModeInDebugState(); return; bits(32) spsr = GetPSRFromPSTATE(AArch32_NonDebugState, 32); if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(M32_Monitor); SPSR_curr[] = spsr; R[14] = preferred_exception_return + lr_offset; PSTATE.T = SCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; PSTATE.<A,I,F> = '111'; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if IsFeatureImplemented(FEAT_PAN) then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if IsFeatureImplemented(FEAT_SSBS) then PSTATE.SSBS = SCTLR.DSSBS; boolean branch_conditional = FALSE; BranchTo(MVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION, branch_conditional); CheckExceptionCatch(TRUE); // Check for debug event on exception entry EndOfInstruction();
// AArch32.CheckAdvSIMDOrFPEnabled() // ================================= // Check against CPACR, FPEXC, HCPTR, NSACR, and CPTR_EL3. AArch32.CheckAdvSIMDOrFPEnabled(boolean fpexc_check_in, boolean advsimd) boolean fpexc_check = fpexc_check_in; if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then // When executing at EL0 using AArch32, if EL1 is using AArch64 then the Effective value of // FPEXC.EN is 1. This includes when EL2 is using AArch64 and enabled in the current // Security state, HCR_EL2.TGE is 1, and the Effective value of HCR_EL2.RW is 1. AArch64.CheckFPAdvSIMDEnabled(); else cpacr_asedis = CPACR.ASEDIS; cpacr_cp10 = CPACR.cp10; if HaveEL(EL3) && ELUsingAArch32(EL3) && CurrentSecurityState() == SS_NonSecure then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then cpacr_asedis = '1'; if NSACR.cp10 == '0' then cpacr_cp10 = '00'; if PSTATE.EL != EL2 then // Check if Advanced SIMD disabled in CPACR if advsimd && cpacr_asedis == '1' then AArch32.Undefined(); // Check if access disabled in CPACR boolean disabled; case cpacr_cp10 of when '00' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0; when '10' disabled = ConstrainUnpredictableBool(Unpredictable_RESCPACR); when '11' disabled = FALSE; if disabled then AArch32.Undefined(); // If required, check FPEXC enabled bit. if (fpexc_check && PSTATE.EL == EL0 && EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1') then // When executing at EL0 using AArch32, if EL2 is using AArch64 and enabled in the // current Security state, HCR_EL2.TGE is 1, and the Effective value of HCR_EL2.RW is 0 // then it is IMPLEMENTATION DEFINED whether the Effective value of FPEXC.EN is 1 // or the value of FPEXC32_EL2.EN. fpexc_check = (boolean IMPLEMENTATION_DEFINED "Use FPEXC32_EL2.EN value when {TGE,RW} == {1,0}"); if fpexc_check && FPEXC.EN == '0' then AArch32.Undefined(); AArch32.CheckFPAdvSIMDTrap(advsimd); // Also check against HCPTR and CPTR_EL3
// AArch32.CheckFPAdvSIMDTrap() // ============================ // Check against CPTR_EL2 and CPTR_EL3. AArch32.CheckFPAdvSIMDTrap(boolean advsimd) if EL2Enabled() && !ELUsingAArch32(EL2) then AArch64.CheckFPAdvSIMDTrap(); else if (HaveEL(EL3) && !ELUsingAArch32(EL3) && CPTR_EL3.TFP == '1' && EL3SDDUndefPriority()) then UNDEFINED; ss = CurrentSecurityState(); if HaveEL(EL2) && ss != SS_Secure then hcptr_tase = HCPTR.TASE; hcptr_cp10 = HCPTR.TCP10; if HaveEL(EL3) && ELUsingAArch32(EL3) then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then hcptr_tase = '1'; if NSACR.cp10 == '0' then hcptr_cp10 = '1'; // Check if access disabled in HCPTR if (advsimd && hcptr_tase == '1') || hcptr_cp10 == '1' then except = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); except.syndrome<24:20> = ConditionSyndrome(); if advsimd then except.syndrome<5> = '1'; else except.syndrome<5> = '0'; except.syndrome<3:0> = '1010'; // coproc field, always 0xA if PSTATE.EL == EL2 then AArch32.TakeUndefInstrException(except); else AArch32.TakeHypTrapException(except); if HaveEL(EL3) && !ELUsingAArch32(EL3) then // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then if EL3SDDUndef() then UNDEFINED; else AArch64.AdvSIMDFPAccessTrap(EL3);
// AArch32.CheckForSMCUndefOrTrap() // ================================ // Check for UNDEFINED or trap on SMC instruction AArch32.CheckForSMCUndefOrTrap() if !HaveEL(EL3) || PSTATE.EL == EL0 then UNDEFINED; if EL2Enabled() && !ELUsingAArch32(EL2) then AArch64.CheckForSMCUndefOrTrap(Zeros(16)); else route_to_hyp = EL2Enabled() && PSTATE.EL == EL1 && HCR.TSC == '1'; if route_to_hyp then except = ExceptionSyndrome(Exception_MonitorCall); AArch32.TakeHypTrapException(except);
// AArch32.CheckForSVCTrap() // ========================= // Check for trap on SVC instruction AArch32.CheckForSVCTrap(bits(16) immediate) if IsFeatureImplemented(FEAT_FGT) then route_to_el2 = FALSE; if PSTATE.EL == EL0 then route_to_el2 = (!ELUsingAArch32(EL1) && EL2Enabled() && HFGITR_EL2.SVC_EL0 == '1' && (!IsInHost() && (!HaveEL(EL3) || SCR_EL3.FGTEn == '1'))); if route_to_el2 then except = ExceptionSyndrome(Exception_SupervisorCall); except.syndrome<15:0> = immediate; except.trappedsyscallinst = TRUE; bits(64) preferred_exception_return = ThisInstrAddr(64); vect_offset = 0x0; AArch64.TakeException(EL2, except, preferred_exception_return, vect_offset);
// AArch32.CheckForWFxTrap() // ========================= // Check for trap on WFE or WFI instruction AArch32.CheckForWFxTrap(bits(2) target_el, WFxType wfxtype) assert HaveEL(target_el); // Check for routing to AArch64 if !ELUsingAArch32(target_el) then AArch64.CheckForWFxTrap(target_el, wfxtype); return; boolean is_wfe = wfxtype == WFxType_WFE; boolean trap; case target_el of when EL1 trap = (if is_wfe then SCTLR.nTWE else SCTLR.nTWI) == '0'; when EL2 trap = (if is_wfe then HCR.TWE else HCR.TWI) == '1'; when EL3 trap = (if is_wfe then SCR.TWE else SCR.TWI) == '1'; if trap then if target_el == EL1 && EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1' then AArch64.WFxTrap(wfxtype, target_el); if target_el == EL3 && !EL3SDDUndef() then AArch32.TakeMonitorTrapException(); elsif target_el == EL2 then except = ExceptionSyndrome(Exception_WFxTrap); except.syndrome<24:20> = ConditionSyndrome(); case wfxtype of when WFxType_WFI except.syndrome<0> = '0'; when WFxType_WFE except.syndrome<0> = '1'; AArch32.TakeHypTrapException(except); else AArch32.TakeUndefInstrException();
// AArch32.CheckITEnabled() // ======================== // Check whether the T32 IT instruction is disabled. AArch32.CheckITEnabled(bits(4) mask) bit it_disabled; if PSTATE.EL == EL2 then it_disabled = HSCTLR.ITD; else it_disabled = (if ELUsingAArch32(EL1) then SCTLR.ITD else SCTLR_ELx[].ITD); if it_disabled == '1' then if mask != '1000' then UNDEFINED; accdesc = CreateAccDescIFetch(); aligned = TRUE; // Otherwise whether the IT block is allowed depends on hw1 of the next instruction. next_instr = AArch32.MemSingle[NextInstrAddr(32), 2, accdesc, aligned]; if next_instr IN {'11xxxxxxxxxxxxxx', '1011xxxxxxxxxxxx', '10100xxxxxxxxxxx', '01001xxxxxxxxxxx', '010001xxx1111xxx', '010001xx1xxxx111'} then // It is IMPLEMENTATION DEFINED whether the Undefined Instruction exception is // taken on the IT instruction or the next instruction. This is not reflected in // the pseudocode, which always takes the exception on the IT instruction. This // also does not take into account cases where the next instruction is UNPREDICTABLE. UNDEFINED; return;
// AArch32.CheckIllegalState() // =========================== // Check PSTATE.IL bit and generate Illegal Execution state exception if set. AArch32.CheckIllegalState() if AArch32.GeneralExceptionsToAArch64() then AArch64.CheckIllegalState(); elsif PSTATE.IL == '1' then route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x04; if PSTATE.EL == EL2 || route_to_hyp then except = ExceptionSyndrome(Exception_IllegalState); if PSTATE.EL == EL2 then AArch32.EnterHypMode(except, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(except, preferred_exception_return, 0x14); else AArch32.TakeUndefInstrException();
// AArch32.CheckSETENDEnabled() // ============================ // Check whether the AArch32 SETEND instruction is disabled. AArch32.CheckSETENDEnabled() bit setend_disabled; if PSTATE.EL == EL2 then setend_disabled = HSCTLR.SED; else setend_disabled = (if ELUsingAArch32(EL1) then SCTLR.SED else SCTLR_ELx[].SED); if setend_disabled == '1' then UNDEFINED; return;
// AArch32.SystemAccessTrap() // ========================== // Trapped System register access. AArch32.SystemAccessTrap(bits(5) mode, integer ec) (valid, target_el) = ELFromM32(mode); assert valid && HaveEL(target_el) && target_el != EL0 && UInt(target_el) >= UInt(PSTATE.EL); if target_el == EL2 then except = AArch32.SystemAccessTrapSyndrome(ThisInstr(), ec); AArch32.TakeHypTrapException(except); else AArch32.TakeUndefInstrException();
// AArch32.SystemAccessTrapSyndrome() // ================================== // Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, // VMSR instructions, other than traps that are due to HCPTR or CPACR. ExceptionRecord AArch32.SystemAccessTrapSyndrome(bits(32) instr, integer ec) ExceptionRecord except; case ec of when 0x0 except = ExceptionSyndrome(Exception_Uncategorized); when 0x3 except = ExceptionSyndrome(Exception_CP15RTTrap); when 0x4 except = ExceptionSyndrome(Exception_CP15RRTTrap); when 0x5 except = ExceptionSyndrome(Exception_CP14RTTrap); when 0x6 except = ExceptionSyndrome(Exception_CP14DTTrap); when 0x7 except = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); when 0x8 except = ExceptionSyndrome(Exception_FPIDTrap); when 0xC except = ExceptionSyndrome(Exception_CP14RRTTrap); otherwise Unreachable(); bits(20) iss = Zeros(20); if except.exceptype == Exception_Uncategorized then return except; elsif except.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then // Trapped MRC/MCR, VMRS on FPSID iss<13:10> = instr<19:16>; // CRn, Reg in case of VMRS iss<8:5> = instr<15:12>; // Rt iss<9> = '0'; // RES0 if except.exceptype != Exception_FPIDTrap then // When trap is not for VMRS iss<19:17> = instr<7:5>; // opc2 iss<16:14> = instr<23:21>; // opc1 iss<4:1> = instr<3:0>; //CRm else //VMRS Access iss<19:17> = '000'; //opc2 - Hardcoded for VMRS iss<16:14> = '111'; //opc1 - Hardcoded for VMRS iss<4:1> = '0000'; //CRm - Hardcoded for VMRS elsif except.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then // Trapped MRRC/MCRR, VMRS/VMSR iss<19:16> = instr<7:4>; // opc1 iss<13:10> = instr<19:16>; // Rt2 iss<8:5> = instr<15:12>; // Rt iss<4:1> = instr<3:0>; // CRm elsif except.exceptype == Exception_CP14DTTrap then // Trapped LDC/STC iss<19:12> = instr<7:0>; // imm8 iss<4> = instr<23>; // U iss<2:1> = instr<24,21>; // P,W if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC iss<8:5> = bits(4) UNKNOWN; iss<3> = '1'; iss<0> = instr<20>; // Direction except.syndrome<24:20> = ConditionSyndrome(); except.syndrome<19:0> = iss; return except;
// AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception. AArch32.TakeHypTrapException(integer ec) except = AArch32.SystemAccessTrapSyndrome(ThisInstr(), ec); AArch32.TakeHypTrapException(except); // AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception. AArch32.TakeHypTrapException(ExceptionRecord except) assert HaveEL(EL2) && CurrentSecurityState() == SS_NonSecure && ELUsingAArch32(EL2); bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x14; AArch32.EnterHypMode(except, preferred_exception_return, vect_offset);
// AArch32.TakeMonitorTrapException() // ================================== // Exceptions routed to Monitor mode as a Monitor Trap exception. AArch32.TakeMonitorTrapException() assert HaveEL(EL3) && ELUsingAArch32(EL3); bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x04; lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2; AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeUndefInstrException() // ================================= AArch32.TakeUndefInstrException() except = ExceptionSyndrome(Exception_Uncategorized); AArch32.TakeUndefInstrException(except); // AArch32.TakeUndefInstrException() // ================================= AArch32.TakeUndefInstrException(ExceptionRecord except) route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(32); vect_offset = 0x04; lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2; if PSTATE.EL == EL2 then AArch32.EnterHypMode(except, preferred_exception_return, vect_offset); elsif route_to_hyp then AArch32.EnterHypMode(except, preferred_exception_return, 0x14); else AArch32.EnterMode(M32_Undef, preferred_exception_return, lr_offset, vect_offset);
// AArch32.Undefined() // =================== AArch32.Undefined() if AArch32.GeneralExceptionsToAArch64() then AArch64.Undefined(); AArch32.TakeUndefInstrException();
// AArch32.DomainValid() // ===================== // Returns TRUE if the Domain is valid for a Short-descriptor translation scheme. boolean AArch32.DomainValid(Fault statuscode, integer level) assert statuscode != Fault_None; case statuscode of when Fault_Domain return TRUE; when Fault_Translation, Fault_AccessFlag, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk return level == 2; otherwise return FALSE;
// AArch32.FaultSyndrome() // ======================= // Creates an exception syndrome value and updates the virtual address for Abort and Watchpoint // exceptions taken to AArch32 Hyp mode. bits(25) AArch32.FaultSyndrome(Exception exceptype, FaultRecord fault) assert fault.statuscode != Fault_None; bits(25) iss = Zeros(25); boolean d_side = exceptype == Exception_DataAbort; if IsFeatureImplemented(FEAT_RAS) && IsAsyncAbort(fault) then ErrorState errstate = PEErrorState(fault); iss<11:10> = AArch32.EncodeAsyncErrorSyndrome(errstate); // AET if d_side then if (IsSecondStage(fault) && !fault.s2fs1walk && (!IsExternalSyncAbort(fault) || (!IsFeatureImplemented(FEAT_RAS) && fault.accessdesc.acctype == AccessType_TTW && boolean IMPLEMENTATION_DEFINED "ISV on second stage translation table walk"))) then iss<24:14> = LSInstructionSyndrome(); if fault.accessdesc.acctype IN {AccessType_DC, AccessType_IC, AccessType_AT} then iss<8> = '1'; if fault.accessdesc.acctype IN {AccessType_DC, AccessType_IC, AccessType_AT} then iss<6> = '1'; elsif fault.statuscode IN {Fault_HWUpdateAccessFlag, Fault_Exclusive} then iss<6> = bit UNKNOWN; elsif fault.accessdesc.atomicop && IsExternalAbort(fault) then iss<6> = bit UNKNOWN; else iss<6> = if fault.write then '1' else '0'; if IsExternalAbort(fault) then iss<9> = fault.extflag; iss<7> = if fault.s2fs1walk then '1' else '0'; iss<5:0> = EncodeLDFSC(fault.statuscode, fault.level); return (iss);
// EncodeSDFSC() // ============= // Function that gives the Short-descriptor FSR code for different types of Fault bits(5) EncodeSDFSC(Fault statuscode, integer level) bits(5) result; case statuscode of when Fault_AccessFlag assert level IN {1,2}; result = if level == 1 then '00011' else '00110'; when Fault_Alignment result = '00001'; when Fault_Permission assert level IN {1,2}; result = if level == 1 then '01101' else '01111'; when Fault_Domain assert level IN {1,2}; result = if level == 1 then '01001' else '01011'; when Fault_Translation assert level IN {1,2}; result = if level == 1 then '00101' else '00111'; when Fault_SyncExternal result = '01000'; when Fault_SyncExternalOnWalk assert level IN {1,2}; result = if level == 1 then '01100' else '01110'; when Fault_SyncParity result = '11001'; when Fault_SyncParityOnWalk assert level IN {1,2}; result = if level == 1 then '11100' else '11110'; when Fault_AsyncParity result = '11000'; when Fault_AsyncExternal result = '10110'; when Fault_Debug result = '00010'; when Fault_TLBConflict result = '10000'; when Fault_Lockdown result = '10100'; // IMPLEMENTATION DEFINED when Fault_Exclusive result = '10101'; // IMPLEMENTATION DEFINED when Fault_ICacheMaint result = '00100'; otherwise Unreachable(); return result;
// A32ExpandImm() // ============== bits(32) A32ExpandImm(bits(12) imm12) // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) = A32ExpandImm_C(imm12, PSTATE.C); return imm32;
// A32ExpandImm_C() // ================ (bits(32), bit) A32ExpandImm_C(bits(12) imm12, bit carry_in) unrotated_value = ZeroExtend(imm12<7:0>, 32); (imm32, carry_out) = Shift_C(unrotated_value, SRType_ROR, 2*UInt(imm12<11:8>), carry_in); return (imm32, carry_out);
// DecodeImmShift() // ================ (SRType, integer) DecodeImmShift(bits(2) srtype, bits(5) imm5) SRType shift_t; integer shift_n; case srtype of when '00' shift_t = SRType_LSL; shift_n = UInt(imm5); when '01' shift_t = SRType_LSR; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '10' shift_t = SRType_ASR; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '11' if imm5 == '00000' then shift_t = SRType_RRX; shift_n = 1; else shift_t = SRType_ROR; shift_n = UInt(imm5); return (shift_t, shift_n);
// DecodeRegShift() // ================ SRType DecodeRegShift(bits(2) srtype) SRType shift_t; case srtype of when '00' shift_t = SRType_LSL; when '01' shift_t = SRType_LSR; when '10' shift_t = SRType_ASR; when '11' shift_t = SRType_ROR; return shift_t;
// RRX() // ===== bits(N) RRX(bits(N) x, bit carry_in) (result, -) = RRX_C(x, carry_in); return result;
// RRX_C() // ======= (bits(N), bit) RRX_C(bits(N) x, bit carry_in) result = carry_in : x<N-1:1>; carry_out = x<0>; return (result, carry_out);
// SRType // ====== enumeration SRType {SRType_LSL, SRType_LSR, SRType_ASR, SRType_ROR, SRType_RRX};
// Shift() // ======= bits(N) Shift(bits(N) value, SRType srtype, integer amount, bit carry_in) (result, -) = Shift_C(value, srtype, amount, carry_in); return result;
// Shift_C() // ========= (bits(N), bit) Shift_C(bits(N) value, SRType srtype, integer amount, bit carry_in) assert !(srtype == SRType_RRX && amount != 1); bits(N) result; bit carry_out; if amount == 0 then (result, carry_out) = (value, carry_in); else case srtype of when SRType_LSL (result, carry_out) = LSL_C(value, amount); when SRType_LSR (result, carry_out) = LSR_C(value, amount); when SRType_ASR (result, carry_out) = ASR_C(value, amount); when SRType_ROR (result, carry_out) = ROR_C(value, amount); when SRType_RRX (result, carry_out) = RRX_C(value, carry_in); return (result, carry_out);
// T32ExpandImm() // ============== bits(32) T32ExpandImm(bits(12) imm12) // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) = T32ExpandImm_C(imm12, PSTATE.C); return imm32;
// T32ExpandImm_C() // ================ (bits(32), bit) T32ExpandImm_C(bits(12) imm12, bit carry_in) bits(32) imm32; bit carry_out; if imm12<11:10> == '00' then case imm12<9:8> of when '00' imm32 = ZeroExtend(imm12<7:0>, 32); when '01' imm32 = '00000000' : imm12<7:0> : '00000000' : imm12<7:0>; when '10' imm32 = imm12<7:0> : '00000000' : imm12<7:0> : '00000000'; when '11' imm32 = imm12<7:0> : imm12<7:0> : imm12<7:0> : imm12<7:0>; carry_out = carry_in; else unrotated_value = ZeroExtend('1':imm12<6:0>, 32); (imm32, carry_out) = ROR_C(unrotated_value, UInt(imm12<11:7>)); return (imm32, carry_out);
// VBitOps // ======= enumeration VBitOps {VBitOps_VBIF, VBitOps_VBIT, VBitOps_VBSL};
// VCGEType // ======== enumeration VCGEType {VCGEType_signed, VCGEType_unsigned, VCGEType_fp};
// VCGTtype // ======== enumeration VCGTtype {VCGTtype_signed, VCGTtype_unsigned, VCGTtype_fp};
// VFPNegMul // ========= enumeration VFPNegMul {VFPNegMul_VNMLA, VFPNegMul_VNMLS, VFPNegMul_VNMUL};
// AArch32.CheckCP15InstrCoarseTraps() // =================================== // Check for coarse-grained traps to System registers in the // coproc=0b1111 encoding space by HSTR and HCR. AArch32.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm) if PSTATE.EL == EL0 && (!ELUsingAArch32(EL1) || (EL2Enabled() && !ELUsingAArch32(EL2))) then AArch64.CheckCP15InstrCoarseTraps(CRn, nreg, CRm); trapped_encoding = ((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) || (CRn == 10 && CRm IN {0,1, 4, 8 }) || (CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15})); // Check for coarse-grained Hyp traps if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then major = if nreg == 1 then CRn else CRm; // Check for MCR, MRC, MCRR, and MRRC disabled by HSTR<CRn/CRm> // and MRC and MCR disabled by HCR.TIDCP. if ((!(major IN {4,14}) && HSTR<major> == '1') || (HCR.TIDCP == '1' && nreg == 1 && trapped_encoding)) then if (PSTATE.EL == EL0 && boolean IMPLEMENTATION_DEFINED "UNDEF unallocated CP15 access at EL0") then UNDEFINED; if ELUsingAArch32(EL2) then AArch32.SystemAccessTrap(M32_Hyp, 0x3); else AArch64.AArch32SystemAccessTrap(EL2, 0x3);
// AArch32.ExclusiveMonitorsPass() // =============================== // Return TRUE if the Exclusives monitors for the current PE include all of the addresses // associated with the virtual address region of size bytes starting at address. // The immediately following memory write must be to the same addresses. // It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens // before or after the check on the local Exclusives monitor. As a result a failure // of the local monitor can occur on some implementations even if the memory // access would give an memory abort. boolean AArch32.ExclusiveMonitorsPass(bits(32) address, integer size) boolean acqrel = FALSE; boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescExLDST(MemOp_STORE, acqrel, tagchecked); boolean aligned = IsAligned(address, size); if !aligned then AArch32.Abort(address, AlignmentFault(accdesc)); if !AArch32.IsExclusiveVA(address, ProcessorID(), size) then return FALSE; memaddrdesc = AArch32.TranslateAddress(address, accdesc, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); ClearExclusiveLocal(ProcessorID()); if passed && memaddrdesc.memattrs.shareability != Shareability_NSH then passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); return passed;
// AArch32.IsExclusiveVA() // ======================= // An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual // address region of size bytes starting at address. // // It is permitted (but not required) for this function to return FALSE and // cause a store exclusive to fail if the virtual address region is not // totally included within the region recorded by MarkExclusiveVA(). // // It is always safe to return TRUE which will check the physical address only. boolean AArch32.IsExclusiveVA(bits(32) address, integer processorid, integer size);
// AArch32.MarkExclusiveVA() // ========================= // Optionally record an exclusive access to the virtual address region of size bytes // starting at address for processorid. AArch32.MarkExclusiveVA(bits(32) address, integer processorid, integer size);
// AArch32.SetExclusiveMonitors() // ============================== // Sets the Exclusives monitors for the current PE to record the addresses associated // with the virtual address region of size bytes starting at address. AArch32.SetExclusiveMonitors(bits(32) address, integer size) boolean acqrel = FALSE; boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescExLDST(MemOp_LOAD, acqrel, tagchecked); boolean aligned = IsAligned(address, size); if !aligned then AArch32.Abort(address, AlignmentFault(accdesc)); memaddrdesc = AArch32.TranslateAddress(address, accdesc, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then return; if memaddrdesc.memattrs.shareability != Shareability_NSH then MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); AArch32.MarkExclusiveVA(address, ProcessorID(), size);
// CheckAdvSIMDEnabled() // ===================== CheckAdvSIMDEnabled() fpexc_check = TRUE; advsimd = TRUE; AArch32.CheckAdvSIMDOrFPEnabled(fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if Advanced SIMD access is permitted // Make temporary copy of D registers // _Dclone[] is used as input data for instruction pseudocode for i = 0 to 31 _Dclone[i] = D[i]; return;
// CheckAdvSIMDOrVFPEnabled() // ========================== CheckAdvSIMDOrVFPEnabled(boolean include_fpexc_check, boolean advsimd) AArch32.CheckAdvSIMDOrFPEnabled(include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted return;
// CheckCryptoEnabled32() // ====================== CheckCryptoEnabled32() CheckAdvSIMDEnabled(); // Return from CheckAdvSIMDEnabled() occurs only if access is permitted return;
// CheckVFPEnabled() // ================= CheckVFPEnabled(boolean include_fpexc_check) advsimd = FALSE; AArch32.CheckAdvSIMDOrFPEnabled(include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted return;
// FPHalvedSub() // ============= bits(N) FPHalvedSub(bits(N) op1, bits(N) op2, FPCR_Type fpcr) assert N IN {16,32,64}; rounding = FPRoundingMode(fpcr); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if inf1 && inf2 && sign1 == sign2 then result = FPDefaultNaN(fpcr, N); FPProcessException(FPExc_InvalidOp, fpcr); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then result = FPInfinity('0', N); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then result = FPInfinity('1', N); elsif zero1 && zero2 && sign1 != sign2 then result = FPZero(sign1, N); else result_value = (value1 - value2) / 2.0; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign, N); else result = FPRound(result_value, fpcr, N); return result;
// FPRSqrtStep() // ============= bits(N) FPRSqrtStep(bits(N) op1, bits(N) op2) assert N IN {16,32}; constant FPCR_Type fpcr = StandardFPCR(); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); bits(N) product; if (inf1 && zero2) || (zero1 && inf2) then product = FPZero('0', N); else product = FPMul(op1, op2, fpcr); bits(N) three = FPThree('0', N); result = FPHalvedSub(three, product, fpcr); return result;
// FPRecipStep() // ============= bits(N) FPRecipStep(bits(N) op1, bits(N) op2) assert N IN {16,32}; constant FPCR_Type fpcr = StandardFPCR(); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); bits(N) product; if (inf1 && zero2) || (zero1 && inf2) then product = FPZero('0', N); else product = FPMul(op1, op2, fpcr); bits(N) two = FPTwo('0', N); result = FPSub(two, product, fpcr); return result;
// StandardFPCR() // ============== FPCR_Type StandardFPCR() bits(32) value = '00000' : FPSCR.AHP : '110000' : FPSCR.FZ16 : '0000000000000000000'; return ZeroExtend(value, 64);
// AArch32.MemSingle[] - non-assignment (read) form // ================================================ // Perform an atomic, little-endian read of 'size' bytes. bits(size*8) AArch32.MemSingle[bits(32) address, integer size, AccessDescriptor accdesc, boolean aligned] bits(size*8) value; AddressDescriptor memaddrdesc; PhysMemRetStatus memstatus; (value, memaddrdesc, memstatus) = AArch32.MemSingleRead(address, size, accdesc, aligned); // Check for a fault from translation or the output of translation. if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); // Check for external aborts. if IsFault(memstatus) then HandleExternalAbort(memstatus, accdesc.write, memaddrdesc, size, accdesc); return value; // AArch32.MemSingle[] - assignment (write) form // ============================================= // Perform an atomic, little-endian write of 'size' bytes. AArch32.MemSingle[bits(32) address, integer size, AccessDescriptor accdesc, boolean aligned] = bits(size*8) value AddressDescriptor memaddrdesc; PhysMemRetStatus memstatus; (memaddrdesc, memstatus) = AArch32.MemSingleWrite(address, size, accdesc, aligned, value); // Check for a fault from translation or the output of translation. if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); // Check for external aborts. if IsFault(memstatus) then HandleExternalWriteAbort(memstatus, memaddrdesc, size, accdesc); return;
// AArch32.MemSingleRead() // ======================= // Perform an atomic, little-endian read of 'size' bytes. (bits(size*8), AddressDescriptor, PhysMemRetStatus) AArch32.MemSingleRead(bits(32) address, integer size, AccessDescriptor accdesc_in, boolean aligned) assert size IN {1, 2, 4, 8, 16}; bits(size*8) value = bits(size*8) UNKNOWN; PhysMemRetStatus memstatus = PhysMemRetStatus UNKNOWN; AccessDescriptor accdesc = accdesc_in; assert IsAligned(address, size); AddressDescriptor memaddrdesc; memaddrdesc = AArch32.TranslateAddress(address, accdesc, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then return (value, memaddrdesc, memstatus); // Memory array access (memstatus, value) = PhysMemRead(memaddrdesc, size, accdesc); if IsFault(memstatus) then return (value, memaddrdesc, memstatus); return (value, memaddrdesc, memstatus);
// AArch32.MemSingleWrite() // ======================== // Perform an atomic, little-endian write of 'size' bytes. (AddressDescriptor, PhysMemRetStatus) AArch32.MemSingleWrite(bits(32) address, integer size, AccessDescriptor accdesc_in, boolean aligned, bits(size*8) value) assert size IN {1, 2, 4, 8, 16}; AccessDescriptor accdesc = accdesc_in; assert IsAligned(address, size); AddressDescriptor memaddrdesc; PhysMemRetStatus memstatus = PhysMemRetStatus UNKNOWN; memaddrdesc = AArch32.TranslateAddress(address, accdesc, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then return (memaddrdesc, memstatus); // Effect on exclusives if memaddrdesc.memattrs.shareability != Shareability_NSH then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size); memstatus = PhysMemWrite(memaddrdesc, size, accdesc, value); if IsFault(memstatus) then return (memaddrdesc, memstatus); return (memaddrdesc, memstatus);
// AArch32.UnalignedAccessFaults() // =============================== // Determine whether the unaligned access generates an Alignment fault boolean AArch32.UnalignedAccessFaults(AccessDescriptor accdesc) return (AlignmentEnforced() || accdesc.a32lsmd || accdesc.exclusive || accdesc.acqsc || accdesc.relsc);
// Hint_PreloadData() // ================== Hint_PreloadData(bits(32) address);
// Hint_PreloadDataForWrite() // ========================== Hint_PreloadDataForWrite(bits(32) address);
// Hint_PreloadInstr() // =================== Hint_PreloadInstr(bits(32) address);
// MemA[] - non-assignment form // ============================ bits(8*size) MemA[bits(32) address, integer size] boolean acqrel = FALSE; boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescExLDST(MemOp_LOAD, acqrel, tagchecked); return Mem_with_type[address, size, accdesc]; // MemA[] - assignment form // ======================== MemA[bits(32) address, integer size] = bits(8*size) value boolean acqrel = FALSE; boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescExLDST(MemOp_STORE, acqrel, tagchecked); Mem_with_type[address, size, accdesc] = value; return;
// MemO[] - non-assignment form // ============================ bits(8*size) MemO[bits(32) address, integer size] boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescAcqRel(MemOp_LOAD, tagchecked); return Mem_with_type[address, size, accdesc]; // MemO[] - assignment form // ======================== MemO[bits(32) address, integer size] = bits(8*size) value boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescAcqRel(MemOp_STORE, tagchecked); Mem_with_type[address, size, accdesc] = value; return;
// MemS[] - non-assignment form // ============================ // Memory accessor for streaming load multiple instructions bits(8*size) MemS[bits(32) address, integer size] AccessDescriptor accdesc = CreateAccDescA32LSMD(MemOp_LOAD); return Mem_with_type[address, size, accdesc]; // MemS[] - assignment form // ======================== // Memory accessor for streaming store multiple instructions MemS[bits(32) address, integer size] = bits(8*size) value AccessDescriptor accdesc = CreateAccDescA32LSMD(MemOp_STORE); Mem_with_type[address, size, accdesc] = value; return;
// MemU[] - non-assignment form // ============================ bits(8*size) MemU[bits(32) address, integer size] boolean nontemporal = FALSE; boolean privileged = PSTATE.EL != EL0; boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescGPR(MemOp_LOAD, nontemporal, privileged, tagchecked); return Mem_with_type[address, size, accdesc]; // MemU[] - assignment form // ======================== MemU[bits(32) address, integer size] = bits(8*size) value boolean nontemporal = FALSE; boolean privileged = PSTATE.EL != EL0; boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescGPR(MemOp_STORE, nontemporal, privileged, tagchecked); Mem_with_type[address, size, accdesc] = value; return;
// MemU_unpriv[] - non-assignment form // =================================== bits(8*size) MemU_unpriv[bits(32) address, integer size] boolean nontemporal = FALSE; boolean privileged = FALSE; boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescGPR(MemOp_LOAD, nontemporal, privileged, tagchecked); return Mem_with_type[address, size, accdesc]; // MemU_unpriv[] - assignment form // =============================== MemU_unpriv[bits(32) address, integer size] = bits(8*size) value boolean nontemporal = FALSE; boolean privileged = FALSE; boolean tagchecked = FALSE; AccessDescriptor accdesc = CreateAccDescGPR(MemOp_STORE, nontemporal, privileged, tagchecked); Mem_with_type[address, size, accdesc] = value; return;
// Mem_with_type[] - non-assignment (read) form // ============================================ // Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access. // Instruction fetches would call AArch32.MemSingle directly. bits(size*8) Mem_with_type[bits(32) address, integer size, AccessDescriptor accdesc_in] assert size IN {1, 2, 4, 8, 16}; AccessDescriptor accdesc = accdesc_in; bits(size * 8) value; // Check alignment on size of element accessed, not overall access size integer alignment = if accdesc.ispair then size DIV 2 else size; boolean aligned = IsAligned(address, alignment); if !aligned && AArch32.UnalignedAccessFaults(accdesc) then AArch32.Abort(address, AlignmentFault(accdesc)); if aligned then value = AArch32.MemSingle[address, size, accdesc, aligned]; else assert size > 1; value<7:0> = AArch32.MemSingle[address, 1, accdesc, aligned]; // For subsequent bytes, if they cross to a new translation page which assigns // Device memory type, it is CONSTRAINED UNPREDICTABLE whether an unaligned access // will generate an Alignment Fault. c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 value<8*i+7:8*i> = AArch32.MemSingle[address+i, 1, accdesc, aligned]; if BigEndian(accdesc.acctype) then value = BigEndianReverse(value); return value; // Mem_with_type[] - assignment (write) form // ========================================= // Perform a write of 'size' bytes. The byte order is reversed for a big-endian access. Mem_with_type[bits(32) address, integer size, AccessDescriptor accdesc_in] = bits(size*8) value_in bits(size*8) value = value_in; AccessDescriptor accdesc = accdesc_in; // Check alignment on size of element accessed, not overall access size integer alignment = if accdesc.ispair then size DIV 2 else size; boolean aligned = IsAligned(address, alignment); if !aligned && AArch32.UnalignedAccessFaults(accdesc) then AArch32.Abort(address, AlignmentFault(accdesc)); if BigEndian(accdesc.acctype) then value = BigEndianReverse(value); if aligned then AArch32.MemSingle[address, size, accdesc, aligned] = value; else assert size > 1; AArch32.MemSingle[address, 1, accdesc, aligned] = value<7:0>; // For subsequent bytes, if they cross to a new translation page which assigns // Device memory type, it is CONSTRAINED UNPREDICTABLE whether an unaligned access // will generate an Alignment Fault. c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 AArch32.MemSingle[address+i, 1, accdesc, aligned] = value<8*i+7:8*i>; return;
// AArch32.ESBOperation() // ====================== // Perform the AArch32 ESB operation for ESB executed in AArch32 state AArch32.ESBOperation() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1'; if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = EffectiveEA() == '1'; if route_to_aarch64 then AArch64.ESBOperation(); return; route_to_monitor = HaveEL(EL3) && ELUsingAArch32(EL3) && EffectiveEA() == '1'; route_to_hyp = PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.AMO == '1'); bits(5) target; if route_to_monitor then target = M32_Monitor; elsif route_to_hyp || PSTATE.M == M32_Hyp then target = M32_Hyp; else target = M32_Abort; boolean mask_active; if CurrentSecurityState() == SS_Secure then mask_active = TRUE; elsif target == M32_Monitor then mask_active = SCR.AW == '1' && (!HaveEL(EL2) || (HCR.TGE == '0' && HCR.AMO == '0')); else mask_active = target == M32_Abort || PSTATE.M == M32_Hyp; mask_set = PSTATE.A == '1'; (-, el) = ELFromM32(target); intdis = Halted() || ExternalDebugInterruptsDisabled(el); masked = intdis || (mask_active && mask_set); // Check for a masked Physical SError pending that can be synchronized // by an Error synchronization event. if masked && IsSynchronizablePhysicalSErrorPending() then bits(32) syndrome = Zeros(32); syndrome<31> = '1'; // A syndrome<15:0> = AArch32.PhysicalSErrorSyndrome(); DISR = syndrome; ClearPendingPhysicalSError(); return;
// AArch32.EncodeAsyncErrorSyndrome() // ================================== // Return the encoding for specified ErrorState for an SError exception taken // to AArch32 state. bits(2) AArch32.EncodeAsyncErrorSyndrome(ErrorState errorstate) case errorstate of when ErrorState_UC return '00'; when ErrorState_UEU return '01'; when ErrorState_UEO return '10'; when ErrorState_UER return '11'; otherwise Unreachable();
// AArch32.PhysicalSErrorSyndrome() // ================================ // Generate SError syndrome. bits(16) AArch32.PhysicalSErrorSyndrome() bits(32) syndrome = Zeros(32); FaultRecord fault = GetPendingPhysicalSError(); if PSTATE.EL == EL2 then ErrorState errstate = PEErrorState(fault); syndrome<11:10> = AArch32.EncodeAsyncErrorSyndrome(errstate); // AET syndrome<9> = fault.extflag; // EA syndrome<5:0> = '010001'; // DFSC else boolean long_format = TTBCR.EAE == '1'; syndrome = AArch32.CommonFaultStatus(fault, long_format); return syndrome<15:0>;
// AArch32.vESBOperation() // ======================= // Perform the ESB operation for virtual SError interrupts executed in AArch32 state. // If FEAT_E3DSE is implemented and there is no unmasked virtual SError exception // pending, then AArch64.dESBOperation() is called to perform the AArch64 ESB operation // for a pending delegated SError exception. AArch32.vESBOperation() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); // Check for EL2 using AArch64 state if !ELUsingAArch32(EL2) then AArch64.vESBOperation(); return; // If physical SError interrupts are routed to Hyp mode, and TGE is not set, // then a virtual SError interrupt might be pending vSEI_enabled = HCR.TGE == '0' && HCR.AMO == '1'; vSEI_pending = vSEI_enabled && HCR.VA == '1'; vintdis = Halted() || ExternalDebugInterruptsDisabled(EL1); vmasked = vintdis || PSTATE.A == '1'; // Check for a masked virtual SError pending if vSEI_pending && vmasked then bits(32) syndrome = Zeros(32); syndrome<31> = '1'; // A syndrome<15:14> = VDFSR<15:14>; // AET syndrome<12> = VDFSR<12>; // ExT syndrome<9> = TTBCR.EAE; // LPAE if TTBCR.EAE == '1' then // Long-descriptor format syndrome<5:0> = '010001'; // STATUS else // Short-descriptor format syndrome<10,3:0> = '10110'; // FS VDISR = syndrome; HCR.VA = '0'; // Clear pending virtual SError elsif IsFeatureImplemented(FEAT_E3DSE) && !ELUsingAArch32(EL3) then AArch64.dESBOperation(); return;
// AArch32.ResetGeneralRegisters() // =============================== AArch32.ResetGeneralRegisters() for i = 0 to 7 R[i] = bits(32) UNKNOWN; for i = 8 to 12 Rmode[i, M32_User] = bits(32) UNKNOWN; Rmode[i, M32_FIQ] = bits(32) UNKNOWN; if HaveEL(EL2) then Rmode[13, M32_Hyp] = bits(32) UNKNOWN; // No R14_hyp for i = 13 to 14 Rmode[i, M32_User] = bits(32) UNKNOWN; Rmode[i, M32_FIQ] = bits(32) UNKNOWN; Rmode[i, M32_IRQ] = bits(32) UNKNOWN; Rmode[i, M32_Svc] = bits(32) UNKNOWN; Rmode[i, M32_Abort] = bits(32) UNKNOWN; Rmode[i, M32_Undef] = bits(32) UNKNOWN; if HaveEL(EL3) then Rmode[i, M32_Monitor] = bits(32) UNKNOWN; return;
// AArch32.ResetSIMDFPRegisters() // ============================== AArch32.ResetSIMDFPRegisters() for i = 0 to 15 Q[i] = bits(128) UNKNOWN; return;
// AArch32.ResetSpecialRegisters() // =============================== AArch32.ResetSpecialRegisters() // AArch32 special registers SPSR_fiq<31:0> = bits(32) UNKNOWN; SPSR_irq<31:0> = bits(32) UNKNOWN; SPSR_svc<31:0> = bits(32) UNKNOWN; SPSR_abt<31:0> = bits(32) UNKNOWN; SPSR_und<31:0> = bits(32) UNKNOWN; if HaveEL(EL2) then SPSR_hyp = bits(32) UNKNOWN; ELR_hyp = bits(32) UNKNOWN; if HaveEL(EL3) then SPSR_mon = bits(32) UNKNOWN; // External debug special registers DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; return;
// AArch32.ResetSystemRegisters() // ============================== AArch32.ResetSystemRegisters(boolean cold_reset);
// ALUExceptionReturn() // ==================== ALUExceptionReturn(bits(32) address) if PSTATE.EL == EL2 then UNDEFINED; elsif PSTATE.M IN {M32_User,M32_System} then Constraint c = ConstrainUnpredictable(Unpredictable_ALUEXCEPTIONRETURN); assert c IN {Constraint_UNDEF, Constraint_NOP}; case c of when Constraint_UNDEF UNDEFINED; when Constraint_NOP EndOfInstruction(); else AArch32.ExceptionReturn(address, SPSR_curr[]);
// ALUWritePC() // ============ ALUWritePC(bits(32) address) if CurrentInstrSet() == InstrSet_A32 then BXWritePC(address, BranchType_INDIR); else BranchWritePC(address, BranchType_INDIR);
// BXWritePC() // =========== BXWritePC(bits(32) address_in, BranchType branch_type) bits(32) address = address_in; if address<0> == '1' then SelectInstrSet(InstrSet_T32); address<0> = '0'; else SelectInstrSet(InstrSet_A32); // For branches to an unaligned PC counter in A32 state, the processor takes the branch // and does one of: // * Forces the address to be aligned // * Leaves the PC unaligned, meaning the target generates a PC Alignment fault. if address<1> == '1' && ConstrainUnpredictableBool(Unpredictable_A32FORCEALIGNPC) then address<1> = '0'; boolean branch_conditional = !(AArch32.CurrentCond() IN {'111x'}); BranchTo(address, branch_type, branch_conditional);
// BranchWritePC() // =============== BranchWritePC(bits(32) address_in, BranchType branch_type) bits(32) address = address_in; if CurrentInstrSet() == InstrSet_A32 then address<1:0> = '00'; else address<0> = '0'; boolean branch_conditional = !(AArch32.CurrentCond() IN {'111x'}); BranchTo(address, branch_type, branch_conditional);
// CBWritePC() // =========== // Takes a branch from a CBNZ/CBZ instruction. CBWritePC(bits(32) address_in) bits(32) address = address_in; assert CurrentInstrSet() == InstrSet_T32; address<0> = '0'; boolean branch_conditional = TRUE; BranchTo(address, BranchType_DIR, branch_conditional);
// D[] - non-assignment form // ========================= bits(64) D[integer n] assert n >= 0 && n <= 31; bits(128) vreg = V[n DIV 2, 128]; return Elem[vreg, n MOD 2, 64]; // D[] - assignment form // ===================== D[integer n] = bits(64) value assert n >= 0 && n <= 31; bits(128) vreg = V[n DIV 2, 128]; Elem[vreg, n MOD 2, 64] = value; V[n DIV 2, 128] = vreg; return;
// Din[] - non-assignment form // =========================== bits(64) Din[integer n] assert n >= 0 && n <= 31; return _Dclone[n];
// H[] - non-assignment form // ========================= bits(16) H[integer n] assert n >= 0 && n <= 31; return S[n]<15:0>; // H[] - assignment form // ===================== H[integer n] = bits(16) value S[n] = ZeroExtend(value, 32);
// LR - assignment form // ==================== LR = bits(32) value R[14] = value; return; // LR - non-assignment form // ======================== bits(32) LR return R[14];
// LoadWritePC() // ============= LoadWritePC(bits(32) address) BXWritePC(address, BranchType_INDIR);
// LookUpRIndex() // ============== integer LookUpRIndex(integer n, bits(5) mode) assert n >= 0 && n <= 14; integer result; case n of // Select index by mode: usr fiq irq svc abt und hyp when 8 result = RBankSelect(mode, 8, 24, 8, 8, 8, 8, 8); when 9 result = RBankSelect(mode, 9, 25, 9, 9, 9, 9, 9); when 10 result = RBankSelect(mode, 10, 26, 10, 10, 10, 10, 10); when 11 result = RBankSelect(mode, 11, 27, 11, 11, 11, 11, 11); when 12 result = RBankSelect(mode, 12, 28, 12, 12, 12, 12, 12); when 13 result = RBankSelect(mode, 13, 29, 17, 19, 21, 23, 15); when 14 result = RBankSelect(mode, 14, 30, 16, 18, 20, 22, 14); otherwise result = n; return result;
// AArch32 program counter // PC32 - non-assignment form // ========================== bits(32) PC32 return R[15]; // This includes the offset from AArch32 state
// PCStoreValue() // ============== bits(32) PCStoreValue() // This function returns the PC value. On architecture versions before Armv7, it // is permitted to instead return PC+4, provided it does so consistently. It is // used only to describe A32 instructions, so it returns the address of the current // instruction plus 8 (normally) or 12 (when the alternative is permitted). return PC32;
// Q[] - non-assignment form // ========================= bits(128) Q[integer n] assert n >= 0 && n <= 15; return V[n, 128]; // Q[] - assignment form // ===================== Q[integer n] = bits(128) value assert n >= 0 && n <= 15; V[n, 128] = value; return;
// Qin[] - non-assignment form // =========================== bits(128) Qin[integer n] assert n >= 0 && n <= 15; return Din[2*n+1]:Din[2*n];
// R[] - assignment form // ===================== R[integer n] = bits(32) value Rmode[n, PSTATE.M] = value; return; // R[] - non-assignment form // ========================= bits(32) R[integer n] if n == 15 then offset = (if CurrentInstrSet() == InstrSet_A32 then 8 else 4); return _PC<31:0> + offset; else return Rmode[n, PSTATE.M];
// RBankSelect() // ============= integer RBankSelect(bits(5) mode, integer usr, integer fiq, integer irq, integer svc, integer abt, integer und, integer hyp) integer result; case mode of when M32_User result = usr; // User mode when M32_FIQ result = fiq; // FIQ mode when M32_IRQ result = irq; // IRQ mode when M32_Svc result = svc; // Supervisor mode when M32_Abort result = abt; // Abort mode when M32_Hyp result = hyp; // Hyp mode when M32_Undef result = und; // Undefined mode when M32_System result = usr; // System mode uses User mode registers otherwise Unreachable(); // Monitor mode return result;
// Rmode[] - non-assignment form // ============================= bits(32) Rmode[integer n, bits(5) mode] assert n >= 0 && n <= 14; // Check for attempted use of Monitor mode in Non-secure state. if CurrentSecurityState() != SS_Secure then assert mode != M32_Monitor; assert !BadMode(mode); if mode == M32_Monitor then if n == 13 then return SP_mon; elsif n == 14 then return LR_mon; else return _R[n]<31:0>; else return _R[LookUpRIndex(n, mode)]<31:0>; // Rmode[] - assignment form // ========================= Rmode[integer n, bits(5) mode] = bits(32) value assert n >= 0 && n <= 14; // Check for attempted use of Monitor mode in Non-secure state. if CurrentSecurityState() != SS_Secure then assert mode != M32_Monitor; assert !BadMode(mode); if mode == M32_Monitor then if n == 13 then SP_mon = value; elsif n == 14 then LR_mon = value; else _R[n]<31:0> = value; else // It is CONSTRAINED UNPREDICTABLE whether the upper 32 bits of the X // register are unchanged or set to zero. This is also tested for on // exception entry, as this applies to all AArch32 registers. if HaveAArch64() && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then _R[LookUpRIndex(n, mode)] = ZeroExtend(value, 64); else _R[LookUpRIndex(n, mode)]<31:0> = value; return;
// S[] - non-assignment form // ========================= bits(32) S[integer n] assert n >= 0 && n <= 31; bits(128) vreg = V[n DIV 4, 128]; return Elem[vreg, n MOD 4, 32]; // S[] - assignment form // ===================== S[integer n] = bits(32) value assert n >= 0 && n <= 31; bits(128) vreg = V[n DIV 4, 128]; Elem[vreg, n MOD 4, 32] = value; V[n DIV 4, 128] = vreg; return;
// _Dclone[] // ========= // Clone the 64-bit Advanced SIMD and VFP extension register bank for use as input to // instruction pseudocode, to avoid read-after-write for Advanced SIMD and VFP operations. array bits(64) _Dclone[0..31];
// AArch32.ExceptionReturn() // ========================= AArch32.ExceptionReturn(bits(32) new_pc_in, bits(32) spsr) bits(32) new_pc = new_pc_in; SynchronizeContext(); // Attempts to change to an illegal mode or state will invoke the Illegal Execution state // mechanism SetPSTATEFromPSR(spsr); ClearExclusiveLocal(ProcessorID()); SendEventLocal(); if PSTATE.IL == '1' then // If the exception return is illegal, PC[1:0] are UNKNOWN new_pc<1:0> = bits(2) UNKNOWN; else // LR[1:0] or LR[0] are treated as being 0, depending on the target instruction set state if PSTATE.T == '1' then new_pc<0> = '0'; // T32 else new_pc<1:0> = '00'; // A32 boolean branch_conditional = !(AArch32.CurrentCond() IN {'111x'}); BranchTo(new_pc, BranchType_ERET, branch_conditional); CheckExceptionCatch(FALSE); // Check for debug event on exception return
// AArch32.ExecutingCP10or11Instr() // ================================ boolean AArch32.ExecutingCP10or11Instr() instr = ThisInstr(); instr_set = CurrentInstrSet(); assert instr_set IN {InstrSet_A32, InstrSet_T32}; if instr_set == InstrSet_A32 then return ((instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> IN {'101x'}); else // InstrSet_T32 return (instr<31:28> IN {'111x'} && (instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> IN {'101x'});
// AArch32.ITAdvance() // =================== AArch32.ITAdvance() if PSTATE.IT<2:0> == '000' then PSTATE.IT = '00000000'; else PSTATE.IT<4:0> = LSL(PSTATE.IT<4:0>, 1); return;
// AArch32.SysRegRead() // ==================== // Read from a 32-bit AArch32 System register and write the register's contents to R[t]. AArch32.SysRegRead(integer cp_num, bits(32) instr, integer t);
// AArch32.SysRegRead64() // ====================== // Read from a 64-bit AArch32 System register and write the register's contents to R[t] and R[t2]. AArch32.SysRegRead64(integer cp_num, bits(32) instr, integer t, integer t2);
// AArch32.SysRegReadCanWriteAPSR() // ================================ // Determines whether the AArch32 System register read instruction can write to APSR flags. boolean AArch32.SysRegReadCanWriteAPSR(integer cp_num, bits(32) instr) assert UsingAArch32(); assert (cp_num IN {14,15}); assert cp_num == UInt(instr<11:8>); opc1 = UInt(instr<23:21>); opc2 = UInt(instr<7:5>); CRn = UInt(instr<19:16>); CRm = UInt(instr<3:0>); if cp_num == 14 && opc1 == 0 && CRn == 0 && CRm == 1 && opc2 == 0 then // DBGDSCRint return TRUE; return FALSE;
// AArch32.SysRegWrite() // ===================== // Read the contents of R[t] and write to a 32-bit AArch32 System register. AArch32.SysRegWrite(integer cp_num, bits(32) instr, integer t);
// AArch32.SysRegWrite64() // ======================= // Read the contents of R[t] and R[t2] and write to a 64-bit AArch32 System register. AArch32.SysRegWrite64(integer cp_num, bits(32) instr, integer t, integer t2);
// AArch32.SysRegWriteM() // ====================== // Read a value from a virtual address and write it to an AArch32 System register. AArch32.SysRegWriteM(integer cp_num, bits(32) instr, bits(32) address);
// AArch32.WriteMode() // =================== // Function for dealing with writes to PSTATE.M from AArch32 state only. // This ensures that PSTATE.EL and PSTATE.SP are always valid. AArch32.WriteMode(bits(5) mode) (valid,el) = ELFromM32(mode); assert valid; PSTATE.M = mode; PSTATE.EL = el; PSTATE.nRW = '1'; PSTATE.SP = (if mode IN {M32_User,M32_System} then '0' else '1'); return;
// AArch32.WriteModeByInstr() // ========================== // Function for dealing with writes to PSTATE.M from an AArch32 instruction, and ensuring that // illegal state changes are correctly flagged in PSTATE.IL. AArch32.WriteModeByInstr(bits(5) mode) (valid,el) = ELFromM32(mode); // 'valid' is set to FALSE if' mode' is invalid for this implementation or the current value // of SCR.NS/SCR_EL3.NS. Additionally, it is illegal for an instruction to write 'mode' to // PSTATE.EL if it would result in any of: // * A change to a mode that would cause entry to a higher Exception level. if UInt(el) > UInt(PSTATE.EL) then valid = FALSE; // * A change to or from Hyp mode. if (PSTATE.M == M32_Hyp || mode == M32_Hyp) && PSTATE.M != mode then valid = FALSE; // * When EL2 is implemented, the value of HCR.TGE is '1', a change to a Non-secure EL1 mode. if PSTATE.M == M32_Monitor && HaveEL(EL2) && el == EL1 && SCR.NS == '1' && HCR.TGE == '1' then valid = FALSE; if !valid then PSTATE.IL = '1'; else AArch32.WriteMode(mode);
// BadMode() // ========= boolean BadMode(bits(5) mode) // Return TRUE if 'mode' encodes a mode that is not valid for this implementation boolean valid; case mode of when M32_Monitor valid = HaveAArch32EL(EL3); when M32_Hyp valid = HaveAArch32EL(EL2); when M32_FIQ, M32_IRQ, M32_Svc, M32_Abort, M32_Undef, M32_System // If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure // state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using // AArch64, then these modes are EL1 modes. // Therefore it is sufficient to test this implementation supports EL1 using AArch32. valid = HaveAArch32EL(EL1); when M32_User valid = HaveAArch32EL(EL0); otherwise valid = FALSE; // Passed an illegal mode value return !valid;
// BankedRegisterAccessValid() // =========================== // Checks for MRS (Banked register) or MSR (Banked register) accesses to registers // other than the SPSRs that are invalid. This includes ELR_hyp accesses. BankedRegisterAccessValid(bits(5) SYSm, bits(5) mode) case SYSm of when '000xx', '00100' // R8_usr to R12_usr if mode != M32_FIQ then UNPREDICTABLE; when '00101' // SP_usr if mode == M32_System then UNPREDICTABLE; when '00110' // LR_usr if mode IN {M32_Hyp,M32_System} then UNPREDICTABLE; when '010xx', '0110x', '01110' // R8_fiq to R12_fiq, SP_fiq, LR_fiq if mode == M32_FIQ then UNPREDICTABLE; when '1000x' // LR_irq, SP_irq if mode == M32_IRQ then UNPREDICTABLE; when '1001x' // LR_svc, SP_svc if mode == M32_Svc then UNPREDICTABLE; when '1010x' // LR_abt, SP_abt if mode == M32_Abort then UNPREDICTABLE; when '1011x' // LR_und, SP_und if mode == M32_Undef then UNPREDICTABLE; when '1110x' // LR_mon, SP_mon if (!HaveEL(EL3) || CurrentSecurityState() != SS_Secure || mode == M32_Monitor) then UNPREDICTABLE; when '11110' // ELR_hyp, only from Monitor or Hyp mode if !HaveEL(EL2) || !(mode IN {M32_Monitor,M32_Hyp}) then UNPREDICTABLE; when '11111' // SP_hyp, only from Monitor mode if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE; otherwise UNPREDICTABLE; return;
// CPSRWriteByInstr() // ================== // Update PSTATE.<N,Z,C,V,Q,GE,E,A,I,F,M> from a CPSR value written by an MSR instruction. CPSRWriteByInstr(bits(32) value, bits(4) bytemask) privileged = PSTATE.EL != EL0; // PSTATE.<A,I,F,M> are not writable at EL0 // Write PSTATE from 'value', ignoring bytes masked by 'bytemask' if bytemask<3> == '1' then PSTATE.<N,Z,C,V,Q> = value<31:27>; // Bits <26:24> are ignored if bytemask<2> == '1' then if IsFeatureImplemented(FEAT_SSBS) then PSTATE.SSBS = value<23>; if privileged then PSTATE.PAN = value<22>; if IsFeatureImplemented(FEAT_DIT) then PSTATE.DIT = value<21>; // Bit <20> is RES0 PSTATE.GE = value<19:16>; if bytemask<1> == '1' then // Bits <15:10> are RES0 PSTATE.E = value<9>; // PSTATE.E is writable at EL0 if privileged then PSTATE.A = value<8>; if bytemask<0> == '1' then if privileged then PSTATE.<I,F> = value<7:6>; // Bit <5> is RES0 // AArch32.WriteModeByInstr() sets PSTATE.IL to 1 if this is an illegal mode change. AArch32.WriteModeByInstr(value<4:0>); return;
// ConditionPassed() // ================= boolean ConditionPassed() return ConditionHolds(AArch32.CurrentCond());
// CurrentCond() // ============= bits(4) AArch32.CurrentCond();
// InITBlock() // =========== boolean InITBlock() if CurrentInstrSet() == InstrSet_T32 then return PSTATE.IT<3:0> != '0000'; else return FALSE;
// LastInITBlock() // =============== boolean LastInITBlock() return (PSTATE.IT<3:0> == '1000');
// SPSRWriteByInstr() // ================== SPSRWriteByInstr(bits(32) value, bits(4) bytemask) bits(32) new_spsr = SPSR_curr[]; if bytemask<3> == '1' then new_spsr<31:24> = value<31:24>; // N,Z,C,V,Q flags, IT[1:0],J bits if bytemask<2> == '1' then new_spsr<23:16> = value<23:16>; // IL bit, GE[3:0] flags if bytemask<1> == '1' then new_spsr<15:8> = value<15:8>; // IT[7:2] bits, E bit, A interrupt mask if bytemask<0> == '1' then new_spsr<7:0> = value<7:0>; // I,F interrupt masks, T bit, Mode bits SPSR_curr[] = new_spsr; // UNPREDICTABLE if User or System mode return;
// SPSRaccessValid() // ================= // Checks for MRS (Banked register) or MSR (Banked register) accesses to the SPSRs // that are UNPREDICTABLE SPSRaccessValid(bits(5) SYSm, bits(5) mode) case SYSm of when '01110' // SPSR_fiq if mode == M32_FIQ then UNPREDICTABLE; when '10000' // SPSR_irq if mode == M32_IRQ then UNPREDICTABLE; when '10010' // SPSR_svc if mode == M32_Svc then UNPREDICTABLE; when '10100' // SPSR_abt if mode == M32_Abort then UNPREDICTABLE; when '10110' // SPSR_und if mode == M32_Undef then UNPREDICTABLE; when '11100' // SPSR_mon if (!HaveEL(EL3) || mode == M32_Monitor || CurrentSecurityState() != SS_Secure) then UNPREDICTABLE; when '11110' // SPSR_hyp if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE; otherwise UNPREDICTABLE; return;
// SelectInstrSet() // ================ SelectInstrSet(InstrSet iset) assert CurrentInstrSet() IN {InstrSet_A32, InstrSet_T32}; assert iset IN {InstrSet_A32, InstrSet_T32}; PSTATE.T = if iset == InstrSet_A32 then '0' else '1'; return;
// AArch32.DTLBI_ALL() // =================== // Invalidate all data TLB entries for the indicated translation regime with the // the indicated security state for all TLBs within the indicated shareability domain. // Invalidation applies to all applicable stage 1 and stage 2 entries. AArch32.DTLBI_ALL(SecurityState security, Regime regime, Shareability shareability, TLBIMemAttr attr) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_DALL; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.level = TLBILevel_Any; r.attr = attr; TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.DTLBI_ASID() // ==================== // Invalidate all data TLB stage 1 entries matching the indicated VMID (where regime supports) // and ASID in the parameter Rt in the indicated translation regime with the // indicated security state for all TLBs within the indicated shareability domain. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. AArch32.DTLBI_ASID(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBIMemAttr attr, bits(32) Rt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_DASID; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.vmid = vmid; r.level = TLBILevel_Any; r.attr = attr; r.asid = Zeros(8) : Rt<7:0>; TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.DTLBI_VA() // ================== // Invalidate by VA all stage 1 data TLB entries in the indicated shareability domain // matching the indicated VMID and ASID (where regime supports VMID, ASID) in the indicated regime // with the indicated security state. // ASID, VA and related parameters are derived from Rt. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. AArch32.DTLBI_VA(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBILevel level, TLBIMemAttr attr, bits(32) Rt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_DVA; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.asid = Zeros(8) : Rt<7:0>; r.address = Zeros(32) : Rt<31:12> : Zeros(12); TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.ITLBI_ALL() // =================== // Invalidate all instruction TLB entries for the indicated translation regime with the // the indicated security state for all TLBs within the indicated shareability domain. // Invalidation applies to all applicable stage 1 and stage 2 entries. AArch32.ITLBI_ALL(SecurityState security, Regime regime, Shareability shareability, TLBIMemAttr attr) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_IALL; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.level = TLBILevel_Any; r.attr = attr; TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.ITLBI_ASID() // ==================== // Invalidate all instruction TLB stage 1 entries matching the indicated VMID // (where regime supports) and ASID in the parameter Rt in the indicated translation // regime with the indicated security state for all TLBs within the indicated shareability domain. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. AArch32.ITLBI_ASID(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBIMemAttr attr, bits(32) Rt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_IASID; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.vmid = vmid; r.level = TLBILevel_Any; r.attr = attr; r.asid = Zeros(8) : Rt<7:0>; TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.ITLBI_VA() // ================== // Invalidate by VA all stage 1 instruction TLB entries in the indicated shareability domain // matching the indicated VMID and ASID (where regime supports VMID, ASID) in the indicated regime // with the indicated security state. // ASID, VA and related parameters are derived from Rt. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. AArch32.ITLBI_VA(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBILevel level, TLBIMemAttr attr, bits(32) Rt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_IVA; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.asid = Zeros(8) : Rt<7:0>; r.address = Zeros(32) : Rt<31:12> : Zeros(12); TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.TLBI_ALL() // ================== // Invalidate all entries for the indicated translation regime with the // the indicated security state for all TLBs within the indicated shareability domain. // Invalidation applies to all applicable stage 1 and stage 2 entries. AArch32.TLBI_ALL(SecurityState security, Regime regime, Shareability shareability, TLBIMemAttr attr) assert PSTATE.EL IN {EL3, EL2}; TLBIRecord r; r.op = TLBIOp_ALL; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.level = TLBILevel_Any; r.attr = attr; TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.TLBI_ASID() // =================== // Invalidate all stage 1 entries matching the indicated VMID (where regime supports) // and ASID in the parameter Rt in the indicated translation regime with the // indicated security state for all TLBs within the indicated shareability domain. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. AArch32.TLBI_ASID(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBIMemAttr attr, bits(32) Rt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_ASID; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.vmid = vmid; r.level = TLBILevel_Any; r.attr = attr; r.asid = Zeros(8) : Rt<7:0>; TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.TLBI_IPAS2() // ==================== // Invalidate by IPA all stage 2 only TLB entries in the indicated shareability // domain matching the indicated VMID in the indicated regime with the indicated security state. // Note: stage 1 and stage 2 combined entries are not in the scope of this operation. // IPA and related parameters of the are derived from Rt. AArch32.TLBI_IPAS2(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBILevel level, TLBIMemAttr attr, bits(32) Rt) assert PSTATE.EL IN {EL3, EL2}; assert security == SS_NonSecure; TLBIRecord r; r.op = TLBIOp_IPAS2; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.address = Zeros(24) : Rt<27:0> : Zeros(12); r.ipaspace = PAS_NonSecure; TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.TLBI_VA() // ================= // Invalidate by VA all stage 1 TLB entries in the indicated shareability domain // matching the indicated VMID and ASID (where regime supports VMID, ASID) in the indicated regime // with the indicated security state. // ASID, VA and related parameters are derived from Rt. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. AArch32.TLBI_VA(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBILevel level, TLBIMemAttr attr, bits(32) Rt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_VA; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.asid = Zeros(8) : Rt<7:0>; r.address = Zeros(32) : Rt<31:12> : Zeros(12); TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.TLBI_VAA() // ================== // Invalidate by VA all stage 1 TLB entries in the indicated shareability domain // matching the indicated VMID (where regime supports VMID) and all ASID in the indicated regime // with the indicated security state. // VA and related parameters are derived from Rt. // Note: stage 1 and stage 2 combined entries are in the scope of this operation. AArch32.TLBI_VAA(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBILevel level, TLBIMemAttr attr, bits(32) Rt) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_VAA; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.vmid = vmid; r.level = level; r.attr = attr; r.address = Zeros(32) : Rt<31:12> : Zeros(12); TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.TLBI_VMALL() // ==================== // Invalidate all stage 1 entries for the indicated translation regime with the // the indicated security state for all TLBs within the indicated shareability // domain that match the indicated VMID (where applicable). // Note: stage 1 and stage 2 combined entries are in the scope of this operation. // Note: stage 2 only entries are not in the scope of this operation. AArch32.TLBI_VMALL(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBIMemAttr attr) assert PSTATE.EL IN {EL3, EL2, EL1}; TLBIRecord r; r.op = TLBIOp_VMALL; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.level = TLBILevel_Any; r.vmid = vmid; r.attr = attr; TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// AArch32.TLBI_VMALLS12() // ======================= // Invalidate all stage 1 and stage 2 entries for the indicated translation // regime with the indicated security state for all TLBs within the indicated // shareability domain that match the indicated VMID. AArch32.TLBI_VMALLS12(SecurityState security, Regime regime, bits(16) vmid, Shareability shareability, TLBIMemAttr attr) assert PSTATE.EL IN {EL3, EL2}; TLBIRecord r; r.op = TLBIOp_VMALLS12; r.from_aarch64 = FALSE; r.security = security; r.regime = regime; r.level = TLBILevel_Any; r.vmid = vmid; r.attr = attr; TLBI(r); if shareability != Shareability_NSH then Broadcast(shareability, r); return;
// Sat() // ===== bits(N) Sat(integer i, integer N, boolean unsigned) result = if unsigned then UnsignedSat(i, N) else SignedSat(i, N); return result;
// SignedSat() // =========== bits(N) SignedSat(integer i, integer N) (result, -) = SignedSatQ(i, N); return result;
// UnsignedSat() // ============= bits(N) UnsignedSat(integer i, integer N) (result, -) = UnsignedSatQ(i, N); return result;
// AArch32.IC() // ============ // Perform Instruction Cache Operation. AArch32.IC(CacheOpScope opscope) regval = bits(32) UNKNOWN; AArch32.IC(regval, opscope); // AArch32.IC() // ============ // Perform Instruction Cache Operation. AArch32.IC(bits(32) regval, CacheOpScope opscope) CacheRecord cache; cache.acctype = AccessType_IC; cache.cachetype = CacheType_Instruction; cache.cacheop = CacheOp_Invalidate; cache.opscope = opscope; cache.security = SecurityStateAtEL(PSTATE.EL); if opscope IN {CacheOpScope_ALLU, CacheOpScope_ALLUIS} then if opscope == CacheOpScope_ALLUIS || (opscope == CacheOpScope_ALLU && PSTATE.EL == EL1 && EL2Enabled() && HCR.FB == '1') then cache.shareability = Shareability_ISH; else cache.shareability = Shareability_NSH; cache.regval = ZeroExtend(regval, 64); CACHE_OP(cache); else assert opscope == CacheOpScope_PoU; if EL2Enabled() then if PSTATE.EL IN {EL0, EL1} then cache.is_vmid_valid = TRUE; cache.vmid = VMID[]; else cache.is_vmid_valid = FALSE; else cache.is_vmid_valid = FALSE; if PSTATE.EL == EL0 then cache.is_asid_valid = TRUE; cache.asid = ASID[]; else cache.is_asid_valid = FALSE; need_translate = ICInstNeedsTranslation(opscope); cache.shareability = Shareability_NSH; cache.vaddress = ZeroExtend(regval, 64); cache.translated = need_translate; if !need_translate then cache.paddress = FullAddress UNKNOWN; CACHE_OP(cache); return; integer size = 0; boolean aligned = TRUE; AccessDescriptor accdesc = CreateAccDescIC(cache); AddressDescriptor memaddrdesc = AArch32.TranslateAddress(regval, accdesc, aligned, size); if IsFault(memaddrdesc) then AArch32.Abort(regval, memaddrdesc.fault); cache.paddress = memaddrdesc.paddress; CACHE_OP(cache); return;
// AArch32.RestrictPrediction() // ============================ // Clear all predictions in the context. AArch32.RestrictPrediction(bits(32) val, RestrictType restriction) ExecutionCntxt c; target_el = val<25:24>; // If the target EL is not implemented or the instruction is executed at an // EL lower than the specified level, the instruction is treated as a NOP. if !HaveEL(target_el) || UInt(target_el) > UInt(PSTATE.EL) then EndOfInstruction(); bit ns = val<26>; bit nse = bit UNKNOWN; ss = TargetSecurityState(ns, nse); c.security = ss; c.target_el = target_el; if EL2Enabled() then if PSTATE.EL IN {EL0, EL1} then c.is_vmid_valid = TRUE; c.all_vmid = FALSE; c.vmid = VMID[]; elsif target_el IN {EL0, EL1} then c.is_vmid_valid = TRUE; c.all_vmid = val<27> == '1'; c.vmid = ZeroExtend(val<23:16>, 16); // Only valid if val<27> == '0'; else c.is_vmid_valid = FALSE; else c.is_vmid_valid = FALSE; if PSTATE.EL == EL0 then c.is_asid_valid = TRUE; c.all_asid = FALSE; c.asid = ASID[]; elsif target_el == EL0 then c.is_asid_valid = TRUE; c.all_asid = val<8> == '1'; c.asid = ZeroExtend(val<7:0>, 16); // Only valid if val<8> == '0'; else c.is_asid_valid = FALSE; c.restriction = restriction; RESTRICT_PREDICTIONS(c);
// AArch32.DefaultTEXDecode() // ========================== // Apply short-descriptor format memory region attributes, without TEX remap MemoryAttributes AArch32.DefaultTEXDecode(bits(3) TEX_in, bit C_in, bit B_in, bit s) MemoryAttributes memattrs; bits(3) TEX = TEX_in; bit C = C_in; bit B = B_in; // Reserved values map to allocated values if (TEX == '001' && C:B == '01') || (TEX == '010' && C:B != '00') || TEX == '011' then bits(5) texcb; (-, texcb) = ConstrainUnpredictableBits(Unpredictable_RESTEXCB, 5); TEX = texcb<4:2>; C = texcb<1>; B = texcb<0>; // Distinction between Inner Shareable and Outer Shareable is not supported in this format // A memory region is either Non-shareable or Outer Shareable case TEX:C:B of when '00000' // Device-nGnRnE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRnE; memattrs.shareability = Shareability_OSH; when '00001', '01000' // Device-nGnRE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRE; memattrs.shareability = Shareability_OSH; when '00010' // Write-through Read allocate memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_WT; memattrs.inner.hints = MemHint_RA; memattrs.outer.attrs = MemAttr_WT; memattrs.outer.hints = MemHint_RA; memattrs.shareability = if s == '1' then Shareability_OSH else Shareability_NSH; when '00011' // Write-back Read allocate memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_WB; memattrs.inner.hints = MemHint_RA; memattrs.outer.attrs = MemAttr_WB; memattrs.outer.hints = MemHint_RA; memattrs.shareability = if s == '1' then Shareability_OSH else Shareability_NSH; when '00100' // Non-cacheable memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_NC; memattrs.outer.attrs = MemAttr_NC; memattrs.shareability = Shareability_OSH; when '00110' memattrs = MemoryAttributes IMPLEMENTATION_DEFINED; when '00111' // Write-back Read and Write allocate memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_WB; memattrs.inner.hints = MemHint_RWA; memattrs.outer.attrs = MemAttr_WB; memattrs.outer.hints = MemHint_RWA; memattrs.shareability = if s == '1' then Shareability_OSH else Shareability_NSH; when '1xxxx' // Cacheable, TEX<1:0> = Outer attrs, {C,B} = Inner attrs memattrs.memtype = MemType_Normal; memattrs.inner = DecodeSDFAttr(C:B); memattrs.outer = DecodeSDFAttr(TEX<1:0>); if memattrs.inner.attrs == MemAttr_NC && memattrs.outer.attrs == MemAttr_NC then memattrs.shareability = Shareability_OSH; else memattrs.shareability = if s == '1' then Shareability_OSH else Shareability_NSH; otherwise // Reserved, handled above Unreachable(); // The Transient hint is not supported in this format memattrs.inner.transient = FALSE; memattrs.outer.transient = FALSE; memattrs.tags = MemTag_Untagged; if memattrs.inner.attrs == MemAttr_WB && memattrs.outer.attrs == MemAttr_WB then memattrs.xs = '0'; else memattrs.xs = '1'; return memattrs;
// AArch32.MAIRAttr() // ================== // Retrieve the memory attribute encoding indexed in the given MAIR bits(8) AArch32.MAIRAttr(integer index, MAIRType mair) assert (index < 8); return Elem[mair, index, 8];
// AArch32.RemappedTEXDecode() // =========================== // Apply short-descriptor format memory region attributes, with TEX remap MemoryAttributes AArch32.RemappedTEXDecode(Regime regime, bits(3) TEX, bit C, bit B, bit s) MemoryAttributes memattrs; PRRR_Type prrr; NMRR_Type nmrr; region = UInt(TEX<0>:C:B); // TEX<2:1> are ignored in this mapping scheme if region == 6 then return MemoryAttributes IMPLEMENTATION_DEFINED; if regime == Regime_EL30 then prrr = PRRR_S; nmrr = NMRR_S; elsif HaveAArch32EL(EL3) then prrr = PRRR_NS; nmrr = NMRR_NS; else prrr = PRRR; nmrr = NMRR; constant integer base = 2 * region; attrfield = Elem[prrr, region, 2]; if attrfield == '11' then // Reserved, maps to allocated value (-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESPRRR, 2); case attrfield of when '00' // Device-nGnRnE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRnE; memattrs.shareability = Shareability_OSH; when '01' // Device-nGnRE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRE; memattrs.shareability = Shareability_OSH; when '10' NSn = if s == '0' then prrr.NS0 else prrr.NS1; NOSm = prrr<region+24> AND NSn; IRn = nmrr<base+1:base>; ORn = nmrr<base+17:base+16>; memattrs.memtype = MemType_Normal; memattrs.inner = DecodeSDFAttr(IRn); memattrs.outer = DecodeSDFAttr(ORn); if memattrs.inner.attrs == MemAttr_NC && memattrs.outer.attrs == MemAttr_NC then memattrs.shareability = Shareability_OSH; else bits(2) sh = NSn:NOSm; memattrs.shareability = DecodeShareability(sh); when '11' Unreachable(); // The Transient hint is not supported in this format memattrs.inner.transient = FALSE; memattrs.outer.transient = FALSE; memattrs.tags = MemTag_Untagged; if memattrs.inner.attrs == MemAttr_WB && memattrs.outer.attrs == MemAttr_WB then memattrs.xs = '0'; else memattrs.xs = '1'; return memattrs;
// AArch32.CheckBreakpoint() // ========================= // Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32 // translation regime, when either debug exceptions are enabled, or halting debug is enabled // and halting is allowed. FaultRecord AArch32.CheckBreakpoint(FaultRecord fault_in, bits(32) vaddress, AccessDescriptor accdesc, integer size) assert ELUsingAArch32(S1TranslationRegime()); assert size IN {2,4}; FaultRecord fault = fault_in; match = FALSE; mismatch = FALSE; for i = 0 to NumBreakpointsImplemented() - 1 (match_i, mismatch_i) = AArch32.BreakpointMatch(i, vaddress, accdesc, size); match = match || match_i; mismatch = mismatch || mismatch_i; if match && HaltOnBreakpointOrWatchpoint() then reason = DebugHalt_Breakpoint; Halt(reason); elsif (match || mismatch) then fault.statuscode = Fault_Debug; fault.debugmoe = DebugException_Breakpoint; return fault;
// AArch32.CheckDebug() // ==================== // Called on each access to check for a debug exception or entry to Debug state. FaultRecord AArch32.CheckDebug(bits(32) vaddress, AccessDescriptor accdesc, integer size) FaultRecord fault = NoFault(accdesc); boolean d_side = (IsDataAccess(accdesc.acctype) || accdesc.acctype == AccessType_DC); boolean i_side = (accdesc.acctype == AccessType_IFETCH); generate_exception = AArch32.GenerateDebugExceptions() && DBGDSCRext.MDBGen == '1'; halt = HaltOnBreakpointOrWatchpoint(); // Relative priority of Vector Catch and Breakpoint exceptions not defined in the architecture vector_catch_first = ConstrainUnpredictableBool(Unpredictable_BPVECTORCATCHPRI); if i_side && vector_catch_first && generate_exception then fault = AArch32.CheckVectorCatch(fault, vaddress, size); if fault.statuscode == Fault_None && (generate_exception || halt) then if d_side then fault = AArch32.CheckWatchpoint(fault, vaddress, accdesc, size); elsif i_side then fault = AArch32.CheckBreakpoint(fault, vaddress, accdesc, size); if fault.statuscode == Fault_None && i_side && !vector_catch_first && generate_exception then return AArch32.CheckVectorCatch(fault, vaddress, size); return fault;
// AArch32.CheckVectorCatch() // ========================== // Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32 // translation regime, when debug exceptions are enabled. FaultRecord AArch32.CheckVectorCatch(FaultRecord fault_in, bits(32) vaddress, integer size) assert ELUsingAArch32(S1TranslationRegime()); FaultRecord fault = fault_in; match = AArch32.VCRMatch(vaddress); if size == 4 && !match && AArch32.VCRMatch(vaddress + 2) then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHHALF); if match then fault.statuscode = Fault_Debug; fault.debugmoe = DebugException_VectorCatch; return fault;
// AArch32.CheckWatchpoint() // ========================= // Called before accessing the memory location of "size" bytes at "address", // when either debug exceptions are enabled for the access, or halting debug // is enabled and halting is allowed. FaultRecord AArch32.CheckWatchpoint(FaultRecord fault_in, bits(32) vaddress, AccessDescriptor accdesc, integer size) assert ELUsingAArch32(S1TranslationRegime()); FaultRecord fault = fault_in; if accdesc.acctype == AccessType_DC then if accdesc.cacheop != CacheOp_Invalidate then return fault; elsif !(boolean IMPLEMENTATION_DEFINED "DCIMVAC generates watchpoint") then return fault; elsif !IsDataAccess(accdesc.acctype) then return fault; match = FALSE; for i = 0 to NumWatchpointsImplemented() - 1 if AArch32.WatchpointMatch(i, vaddress, size, accdesc) then match = TRUE; if match && HaltOnBreakpointOrWatchpoint() then reason = DebugHalt_Watchpoint; EDWAR = ZeroExtend(vaddress, 64); Halt(reason); elsif match then fault.statuscode = Fault_Debug; fault.debugmoe = DebugException_Watchpoint; return fault;
// AArch32.IPAIsOutOfRange() // ========================= // Check intermediate physical address bits not resolved by translation are ZERO boolean AArch32.IPAIsOutOfRange(S2TTWParams walkparams, bits(40) ipa) // Input Address size constant integer iasize = AArch32.S2IASize(walkparams.t0sz); return iasize < 40 && !IsZero(ipa<39:iasize>);
// AArch32.S1HasAlignmentFault() // ============================= // Returns whether stage 1 output fails alignment requirement on data accesses // to Device memory boolean AArch32.S1HasAlignmentFault(AccessDescriptor accdesc, boolean aligned, bit ntlsmd, MemoryAttributes memattrs) if accdesc.acctype == AccessType_IFETCH then return FALSE; elsif accdesc.a32lsmd && ntlsmd == '0' then return memattrs.memtype == MemType_Device && memattrs.device != DeviceType_GRE; elsif accdesc.acctype == AccessType_DCZero then return memattrs.memtype == MemType_Device; else return memattrs.memtype == MemType_Device && !aligned;
// AArch32.S1LDHasPermissionsFault() // ================================= // Returns whether an access using stage 1 long-descriptor translation // violates permissions of target memory boolean AArch32.S1LDHasPermissionsFault(Regime regime, S1TTWParams walkparams, Permissions perms, MemType memtype, PASpace paspace, AccessDescriptor accdesc) bit r, w, x; bit pr, pw; bit ur, uw; bit xn; if HasUnprivileged(regime) then // Apply leaf permissions case perms.ap<2:1> of when '00' (pr,pw,ur,uw) = ('1','1','0','0'); // R/W at PL1 only when '01' (pr,pw,ur,uw) = ('1','1','1','1'); // R/W at any PL when '10' (pr,pw,ur,uw) = ('1','0','0','0'); // RO at PL1 only when '11' (pr,pw,ur,uw) = ('1','0','1','0'); // RO at any PL // Apply hierarchical permissions case perms.ap_table of when '00' (pr,pw,ur,uw) = ( pr, pw, ur, uw); // No effect when '01' (pr,pw,ur,uw) = ( pr, pw,'0','0'); // Privileged access when '10' (pr,pw,ur,uw) = ( pr,'0', ur,'0'); // Read-only when '11' (pr,pw,ur,uw) = ( pr,'0','0','0'); // Read-only, privileged access xn = perms.xn OR perms.xn_table; pxn = perms.pxn OR perms.pxn_table; ux = ur AND NOT(xn OR (uw AND walkparams.wxn)); px = pr AND NOT(xn OR pxn OR (pw AND walkparams.wxn) OR (uw AND walkparams.uwxn)); if IsFeatureImplemented(FEAT_PAN) && accdesc.pan then pan = PSTATE.PAN AND (ur OR uw); pr = pr AND NOT(pan); pw = pw AND NOT(pan); (r,w,x) = if accdesc.el == EL0 then (ur,uw,ux) else (pr,pw,px); // Prevent execution from Non-secure space by PE in Secure state if SIF is set if accdesc.ss == SS_Secure && paspace == PAS_NonSecure then x = x AND NOT(walkparams.sif); else // Apply leaf permissions case perms.ap<2> of when '0' (r,w) = ('1','1'); // No effect when '1' (r,w) = ('1','0'); // Read-only // Apply hierarchical permissions case perms.ap_table<1> of when '0' (r,w) = ( r, w ); // No effect when '1' (r,w) = ( r, '0'); // Read-only xn = perms.xn OR perms.xn_table; x = NOT(xn OR (w AND walkparams.wxn)); if accdesc.acctype == AccessType_IFETCH then constraint = ConstrainUnpredictable(Unpredictable_INSTRDEVICE); if constraint == Constraint_FAULT && memtype == MemType_Device then return TRUE; else return x == '0'; elsif accdesc.acctype IN {AccessType_IC, AccessType_DC} then return FALSE; elsif accdesc.write then return w == '0'; else return r == '0';
// AArch32.S1SDHasPermissionsFault() // ================================= // Returns whether an access using stage 1 short-descriptor translation // violates permissions of target memory boolean AArch32.S1SDHasPermissionsFault(Regime regime, Permissions perms_in, MemType memtype, PASpace paspace, AccessDescriptor accdesc) Permissions perms = perms_in; bit pr, pw; bit ur, uw; SCTLR_Type sctlr; if regime == Regime_EL30 then sctlr = SCTLR_S; elsif HaveAArch32EL(EL3) then sctlr = SCTLR_NS; else sctlr = SCTLR; if sctlr.AFE == '0' then // Map Reserved encoding '100' if perms.ap == '100' then perms.ap = bits(3) IMPLEMENTATION_DEFINED "Reserved short descriptor AP encoding"; case perms.ap of when '000' (pr,pw,ur,uw) = ('0','0','0','0'); // No access when '001' (pr,pw,ur,uw) = ('1','1','0','0'); // R/W at PL1 only when '010' (pr,pw,ur,uw) = ('1','1','1','0'); // R/W at PL1, RO at PL0 when '011' (pr,pw,ur,uw) = ('1','1','1','1'); // R/W at any PL // '100' is reserved when '101' (pr,pw,ur,uw) = ('1','0','0','0'); // RO at PL1 only when '110' (pr,pw,ur,uw) = ('1','0','1','0'); // RO at any PL (deprecated) when '111' (pr,pw,ur,uw) = ('1','0','1','0'); // RO at any PL else // Simplified access permissions model case perms.ap<2:1> of when '00' (pr,pw,ur,uw) = ('1','1','0','0'); // R/W at PL1 only when '01' (pr,pw,ur,uw) = ('1','1','1','1'); // R/W at any PL when '10' (pr,pw,ur,uw) = ('1','0','0','0'); // RO at PL1 only when '11' (pr,pw,ur,uw) = ('1','0','1','0'); // RO at any PL ux = ur AND NOT(perms.xn OR (uw AND sctlr.WXN)); px = pr AND NOT(perms.xn OR perms.pxn OR (pw AND sctlr.WXN) OR (uw AND sctlr.UWXN)); if IsFeatureImplemented(FEAT_PAN) && accdesc.pan then pan = PSTATE.PAN AND (ur OR uw); pr = pr AND NOT(pan); pw = pw AND NOT(pan); (r,w,x) = if accdesc.el == EL0 then (ur,uw,ux) else (pr,pw,px); // Prevent execution from Non-secure space by PE in Secure state if SIF is set if accdesc.ss == SS_Secure && paspace == PAS_NonSecure then x = x AND NOT(if ELUsingAArch32(EL3) then SCR.SIF else SCR_EL3.SIF); if accdesc.acctype == AccessType_IFETCH then if (memtype == MemType_Device && ConstrainUnpredictable(Unpredictable_INSTRDEVICE) == Constraint_FAULT) then return TRUE; else return x == '0'; elsif accdesc.acctype IN {AccessType_IC, AccessType_DC} then return FALSE; elsif accdesc.write then return w == '0'; else return r == '0';
// AArch32.S2HasAlignmentFault() // ============================= // Returns whether stage 2 output fails alignment requirement on data accesses // to Device memory boolean AArch32.S2HasAlignmentFault(AccessDescriptor accdesc, boolean aligned, MemoryAttributes memattrs) if accdesc.acctype == AccessType_IFETCH then return FALSE; elsif accdesc.acctype == AccessType_DCZero then return memattrs.memtype == MemType_Device; else return memattrs.memtype == MemType_Device && !aligned;
// AArch32.S2HasPermissionsFault() // =============================== // Returns whether stage 2 access violates permissions of target memory boolean AArch32.S2HasPermissionsFault(S2TTWParams walkparams, Permissions perms, MemType memtype, AccessDescriptor accdesc) bit px; bit ux; r = perms.s2ap<0>; w = perms.s2ap<1>; bit x; if IsFeatureImplemented(FEAT_XNX) then case perms.s2xn:perms.s2xnx of when '00' (px, ux) = ( r, r ); when '01' (px, ux) = ('0', r ); when '10' (px, ux) = ('0', '0'); when '11' (px, ux) = ( r, '0'); x = if accdesc.el == EL0 then ux else px; else x = r AND NOT(perms.s2xn); if accdesc.acctype == AccessType_TTW then return (walkparams.ptw == '1' && memtype == MemType_Device) || r == '0'; elsif accdesc.acctype == AccessType_IFETCH then constraint = ConstrainUnpredictable(Unpredictable_INSTRDEVICE); return (constraint == Constraint_FAULT && memtype == MemType_Device) || x == '0'; elsif accdesc.acctype IN {AccessType_IC, AccessType_DC} then return FALSE; elsif accdesc.write then return w == '0'; else return r == '0';
// AArch32.S2InconsistentSL() // ========================== // Detect inconsistent configuration of stage 2 T0SZ and SL fields boolean AArch32.S2InconsistentSL(S2TTWParams walkparams) startlevel = AArch32.S2StartLevel(walkparams.sl0); levels = FINAL_LEVEL - startlevel; granulebits = TGxGranuleBits(walkparams.tgx); stride = granulebits - 3; // Input address size must at least be large enough to be resolved from the start level sl_min_iasize = ( levels * stride // Bits resolved by table walk, except initial level + granulebits // Bits directly mapped to output address + 1); // At least 1 more bit to be decoded by initial level // Can accomodate 1 more stride in the level + concatenation of up to 2^4 tables sl_max_iasize = sl_min_iasize + (stride-1) + 4; // Configured Input Address size iasize = AArch32.S2IASize(walkparams.t0sz); return iasize < sl_min_iasize || iasize > sl_max_iasize;
// AArch32.VAIsOutOfRange() // ======================== // Check virtual address bits not resolved by translation are identical // and of accepted value boolean AArch32.VAIsOutOfRange(Regime regime, S1TTWParams walkparams, bits(32) va) if regime == Regime_EL2 then // Input Address size constant integer iasize = AArch32.S1IASize(walkparams.t0sz); return walkparams.t0sz != '000' && !IsZero(va<31:iasize>); elsif walkparams.t1sz != '000' && walkparams.t0sz != '000' then // Lower range Input Address size constant integer lo_iasize = AArch32.S1IASize(walkparams.t0sz); // Upper range Input Address size constant integer up_iasize = AArch32.S1IASize(walkparams.t1sz); return !IsZero(va<31:lo_iasize>) && !IsOnes(va<31:up_iasize>); else return FALSE;
// AArch32.GetS1TLBContext() // ========================= // Gather translation context for accesses with VA to match against TLB entries TLBContext AArch32.GetS1TLBContext(Regime regime, SecurityState ss, bits(32) va) TLBContext tlbcontext; case regime of when Regime_EL2 tlbcontext = AArch32.TLBContextEL2(va); when Regime_EL10 tlbcontext = AArch32.TLBContextEL10(ss, va); when Regime_EL30 tlbcontext = AArch32.TLBContextEL30(va); tlbcontext.includes_s1 = TRUE; // The following may be amended for EL1&0 Regime if caching of stage 2 is successful tlbcontext.includes_s2 = FALSE; return tlbcontext;
// AArch32.GetS2TLBContext() // ========================= // Gather translation context for accesses with IPA to match against TLB entries TLBContext AArch32.GetS2TLBContext(FullAddress ipa) assert ipa.paspace == PAS_NonSecure; TLBContext tlbcontext; tlbcontext.ss = SS_NonSecure; tlbcontext.regime = Regime_EL10; tlbcontext.ipaspace = ipa.paspace; tlbcontext.vmid = ZeroExtend(VTTBR.VMID, 16); tlbcontext.tg = TGx_4KB; tlbcontext.includes_s1 = FALSE; tlbcontext.includes_s2 = TRUE; tlbcontext.ia = ZeroExtend(ipa.address, 64); tlbcontext.cnp = if IsFeatureImplemented(FEAT_TTCNP) then VTTBR.CnP else '0'; return tlbcontext;
// AArch32.TLBContextEL10() // ======================== // Gather translation context for accesses under EL10 regime // (PL10 when EL3 is A64) to match against TLB entries TLBContext AArch32.TLBContextEL10(SecurityState ss, bits(32) va) TLBContext tlbcontext; TTBCR_Type ttbcr; TTBR0_Type ttbr0; TTBR1_Type ttbr1; CONTEXTIDR_Type contextidr; if HaveAArch32EL(EL3) then ttbcr = TTBCR_NS; ttbr0 = TTBR0_NS; ttbr1 = TTBR1_NS; contextidr = CONTEXTIDR_NS; else ttbcr = TTBCR; ttbr0 = TTBR0; ttbr1 = TTBR1; contextidr = CONTEXTIDR; tlbcontext.ss = ss; tlbcontext.regime = Regime_EL10; if AArch32.EL2Enabled(ss) then tlbcontext.vmid = ZeroExtend(VTTBR.VMID, 16); if ttbcr.EAE == '1' then tlbcontext.asid = ZeroExtend(if ttbcr.A1 == '0' then ttbr0.ASID else ttbr1.ASID, 16); else tlbcontext.asid = ZeroExtend(contextidr.ASID, 16); tlbcontext.tg = TGx_4KB; tlbcontext.ia = ZeroExtend(va, 64); if IsFeatureImplemented(FEAT_TTCNP) && ttbcr.EAE == '1' then if AArch32.GetVARange(va, ttbcr.T0SZ, ttbcr.T1SZ) == VARange_LOWER then tlbcontext.cnp = ttbr0.CnP; else tlbcontext.cnp = ttbr1.CnP; else tlbcontext.cnp = '0'; return tlbcontext;
// AArch32.TLBContextEL2() // ======================= // Gather translation context for accesses under EL2 regime to match against TLB entries TLBContext AArch32.TLBContextEL2(bits(32) va) TLBContext tlbcontext; tlbcontext.ss = SS_NonSecure; tlbcontext.regime = Regime_EL2; tlbcontext.ia = ZeroExtend(va, 64); tlbcontext.tg = TGx_4KB; tlbcontext.cnp = if IsFeatureImplemented(FEAT_TTCNP) then HTTBR.CnP else '0'; return tlbcontext;
// AArch32.TLBContextEL30() // ======================== // Gather translation context for accesses under EL30 regime // (PL10 in Secure state and EL3 is A32) to match against TLB entries TLBContext AArch32.TLBContextEL30(bits(32) va) TLBContext tlbcontext; tlbcontext.ss = SS_Secure; tlbcontext.regime = Regime_EL30; if TTBCR_S.EAE == '1' then tlbcontext.asid = ZeroExtend(if TTBCR_S.A1 == '0' then TTBR0_S.ASID else TTBR1_S.ASID, 16); else tlbcontext.asid = ZeroExtend(CONTEXTIDR_S.ASID, 16); tlbcontext.tg = TGx_4KB; tlbcontext.ia = ZeroExtend(va, 64); if IsFeatureImplemented(FEAT_TTCNP) && TTBCR_S.EAE == '1' then if AArch32.GetVARange(va, TTBCR_S.T0SZ, TTBCR_S.T1SZ) == VARange_LOWER then tlbcontext.cnp = TTBR0_S.CnP; else tlbcontext.cnp = TTBR1_S.CnP; else tlbcontext.cnp = '0'; return tlbcontext;
// AArch32.EL2Enabled() // ==================== // Returns whether EL2 is enabled for the given Security State boolean AArch32.EL2Enabled(SecurityState ss) if ss == SS_Secure then if !(HaveEL(EL2) && IsFeatureImplemented(FEAT_SEL2)) then return FALSE; elsif HaveEL(EL3) then return SCR_EL3.EEL2 == '1'; else return boolean IMPLEMENTATION_DEFINED "Secure-only implementation"; else return HaveEL(EL2);
// AArch32.FullTranslate() // ======================= // Perform address translation as specified by VMSA-A32 AddressDescriptor AArch32.FullTranslate(bits(32) va, AccessDescriptor accdesc, boolean aligned) // Prepare fault fields in case a fault is detected FaultRecord fault = NoFault(accdesc); Regime regime = TranslationRegime(accdesc.el); // First Stage Translation AddressDescriptor ipa; if regime == Regime_EL2 || TTBCR.EAE == '1' then (fault, ipa) = AArch32.S1TranslateLD(fault, regime, va, aligned, accdesc); else (fault, ipa, -) = AArch32.S1TranslateSD(fault, regime, va, aligned, accdesc); if fault.statuscode != Fault_None then return CreateFaultyAddressDescriptor(ZeroExtend(va, 64), fault); if regime == Regime_EL10 && EL2Enabled() then ipa.vaddress = ZeroExtend(va, 64); AddressDescriptor pa; (fault, pa) = AArch32.S2Translate(fault, ipa, aligned, accdesc); if fault.statuscode != Fault_None then return CreateFaultyAddressDescriptor(ZeroExtend(va, 64), fault); else return pa; else return ipa;
// AArch32.OutputDomain() // ====================== // Determine the domain the translated output address bits(2) AArch32.OutputDomain(Regime regime, bits(4) domain) bits(2) Dn; if regime == Regime_EL30 then Dn = Elem[DACR_S, UInt(domain), 2]; elsif HaveAArch32EL(EL3) then Dn = Elem[DACR_NS, UInt(domain), 2]; else Dn = Elem[DACR, UInt(domain), 2]; if Dn == '10' then // Reserved value maps to an allocated value (-, Dn) = ConstrainUnpredictableBits(Unpredictable_RESDACR, 2); return Dn;
// AArch32.S1DisabledOutput() // ========================== // Flat map the VA to IPA/PA, depending on the regime, assigning default memory attributes (FaultRecord, AddressDescriptor) AArch32.S1DisabledOutput(FaultRecord fault_in, Regime regime, bits(32) va, boolean aligned, AccessDescriptor accdesc) FaultRecord fault = fault_in; // No memory page is guarded when stage 1 address translation is disabled SetInGuardedPage(FALSE); MemoryAttributes memattrs; bit default_cacheable; if regime == Regime_EL10 && AArch32.EL2Enabled(accdesc.ss) then if ELStateUsingAArch32(EL2, accdesc.ss == SS_Secure) then default_cacheable = HCR.DC; else default_cacheable = HCR_EL2.DC; else default_cacheable = '0'; if default_cacheable == '1' then // Use default cacheable settings memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_WB; memattrs.inner.hints = MemHint_RWA; memattrs.outer.attrs = MemAttr_WB; memattrs.outer.hints = MemHint_RWA; memattrs.shareability = Shareability_NSH; if (EL2Enabled() && !ELStateUsingAArch32(EL2, accdesc.ss == SS_Secure) && IsFeatureImplemented(FEAT_MTE2) && HCR_EL2.DCT == '1') then memattrs.tags = MemTag_AllocationTagged; else memattrs.tags = MemTag_Untagged; memattrs.xs = '0'; elsif accdesc.acctype == AccessType_IFETCH then memattrs.memtype = MemType_Normal; memattrs.shareability = Shareability_OSH; memattrs.tags = MemTag_Untagged; if AArch32.S1ICacheEnabled(regime) then memattrs.inner.attrs = MemAttr_WT; memattrs.inner.hints = MemHint_RA; memattrs.outer.attrs = MemAttr_WT; memattrs.outer.hints = MemHint_RA; else memattrs.inner.attrs = MemAttr_NC; memattrs.outer.attrs = MemAttr_NC; memattrs.xs = '1'; else // Treat memory region as Device memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRnE; memattrs.shareability = Shareability_OSH; memattrs.tags = MemTag_Untagged; memattrs.xs = '1'; bit ntlsmd; if IsFeatureImplemented(FEAT_LSMAOC) then case regime of when Regime_EL30 ntlsmd = SCTLR_S.nTLSMD; when Regime_EL2 ntlsmd = HSCTLR.nTLSMD; when Regime_EL10 ntlsmd = if HaveAArch32EL(EL3) then SCTLR_NS.nTLSMD else SCTLR.nTLSMD; else ntlsmd = '1'; if AArch32.S1HasAlignmentFault(accdesc, aligned, ntlsmd, memattrs) then fault.statuscode = Fault_Alignment; return (fault, AddressDescriptor UNKNOWN); FullAddress oa; oa.address = ZeroExtend(va, 56); oa.paspace = if accdesc.ss == SS_Secure then PAS_Secure else PAS_NonSecure; ipa = CreateAddressDescriptor(ZeroExtend(va, 64), oa, memattrs); return (fault, ipa);
// AArch32.S1Enabled() // =================== // Returns whether stage 1 translation is enabled for the active translation regime boolean AArch32.S1Enabled(Regime regime, SecurityState ss) if regime == Regime_EL2 then return HSCTLR.M == '1'; elsif regime == Regime_EL30 then return SCTLR_S.M == '1'; elsif !AArch32.EL2Enabled(ss) then return (if HaveAArch32EL(EL3) then SCTLR_NS.M else SCTLR.M) == '1'; elsif ELStateUsingAArch32(EL2, ss == SS_Secure) then return HCR.<TGE,DC> == '00' && (if HaveAArch32EL(EL3) then SCTLR_NS.M else SCTLR.M) == '1'; else return EL2Enabled() && HCR_EL2.<TGE,DC> == '00' && SCTLR.M == '1';
// AArch32.S1TranslateLD() // ======================= // Perform a stage 1 translation using long-descriptor format mapping VA to IPA/PA // depending on the regime (FaultRecord, AddressDescriptor) AArch32.S1TranslateLD(FaultRecord fault_in, Regime regime, bits(32) va, boolean aligned, AccessDescriptor accdesc) FaultRecord fault = fault_in; if !AArch32.S1Enabled(regime, accdesc.ss) then return AArch32.S1DisabledOutput(fault, regime, va, aligned, accdesc); walkparams = AArch32.GetS1TTWParams(regime, va); if AArch32.VAIsOutOfRange(regime, walkparams, va) then fault.level = 1; fault.statuscode = Fault_Translation; return (fault, AddressDescriptor UNKNOWN); TTWState walkstate; (fault, walkstate) = AArch32.S1WalkLD(fault, regime, walkparams, accdesc, va); if fault.statuscode != Fault_None then return (fault, AddressDescriptor UNKNOWN); SetInGuardedPage(FALSE); // AArch32-VMSA does not guard any pages if AArch32.S1HasAlignmentFault(accdesc, aligned, walkparams.ntlsmd, walkstate.memattrs) then fault.statuscode = Fault_Alignment; elsif AArch32.S1LDHasPermissionsFault(regime, walkparams, walkstate.permissions, walkstate.memattrs.memtype, walkstate.baseaddress.paspace, accdesc) then fault.statuscode = Fault_Permission; if fault.statuscode != Fault_None then return (fault, AddressDescriptor UNKNOWN); MemoryAttributes memattrs; if ((accdesc.acctype == AccessType_IFETCH && (walkstate.memattrs.memtype == MemType_Device || !AArch32.S1ICacheEnabled(regime))) || (accdesc.acctype != AccessType_IFETCH && walkstate.memattrs.memtype == MemType_Normal && !AArch32.S1DCacheEnabled(regime))) then // Treat memory attributes as Normal Non-Cacheable memattrs = NormalNCMemAttr(); memattrs.xs = walkstate.memattrs.xs; else memattrs = walkstate.memattrs; // Shareability value of stage 1 translation subject to stage 2 is IMPLEMENTATION DEFINED // to be either effective value or descriptor value if (regime == Regime_EL10 && AArch32.EL2Enabled(accdesc.ss) && (if ELStateUsingAArch32(EL2, accdesc.ss==SS_Secure) then HCR.VM else HCR_EL2.VM) == '1' && !(boolean IMPLEMENTATION_DEFINED "Apply effective shareability at stage 1")) then memattrs.shareability = walkstate.memattrs.shareability; else memattrs.shareability = EffectiveShareability(memattrs); // Output Address oa = StageOA(ZeroExtend(va, 64), walkparams.d128, walkparams.tgx, walkstate); ipa = CreateAddressDescriptor(ZeroExtend(va, 64), oa, memattrs); return (fault, ipa);
// AArch32.S1TranslateSD() // ======================= // Perform a stage 1 translation using short-descriptor format mapping VA to IPA/PA // depending on the regime (FaultRecord, AddressDescriptor, SDFType) AArch32.S1TranslateSD(FaultRecord fault_in, Regime regime, bits(32) va, boolean aligned, AccessDescriptor accdesc) FaultRecord fault = fault_in; if !AArch32.S1Enabled(regime, accdesc.ss) then AddressDescriptor ipa; (fault, ipa) = AArch32.S1DisabledOutput(fault, regime, va, aligned, accdesc); return (fault, ipa, SDFType UNKNOWN); TTWState walkstate; (fault, walkstate) = AArch32.S1WalkSD(fault, regime, accdesc, va); if fault.statuscode != Fault_None then return (fault, AddressDescriptor UNKNOWN, SDFType UNKNOWN); domain = AArch32.OutputDomain(regime, walkstate.domain); SetInGuardedPage(FALSE); // AArch32-VMSA does not guard any pages bit ntlsmd; if IsFeatureImplemented(FEAT_LSMAOC) then case regime of when Regime_EL30 ntlsmd = SCTLR_S.nTLSMD; when Regime_EL10 ntlsmd = if HaveAArch32EL(EL3) then SCTLR_NS.nTLSMD else SCTLR.nTLSMD; else ntlsmd = '1'; if AArch32.S1HasAlignmentFault(accdesc, aligned, ntlsmd, walkstate.memattrs) then fault.statuscode = Fault_Alignment; elsif (!(accdesc.acctype IN {AccessType_IC, AccessType_DC}) && domain == Domain_NoAccess) then fault.statuscode = Fault_Domain; elsif domain == Domain_Client then if AArch32.S1SDHasPermissionsFault(regime, walkstate.permissions, walkstate.memattrs.memtype, walkstate.baseaddress.paspace, accdesc) then fault.statuscode = Fault_Permission; if fault.statuscode != Fault_None then fault.domain = walkstate.domain; return (fault, AddressDescriptor UNKNOWN, walkstate.sdftype); MemoryAttributes memattrs; if ((accdesc.acctype == AccessType_IFETCH && (walkstate.memattrs.memtype == MemType_Device || !AArch32.S1ICacheEnabled(regime))) || (accdesc.acctype != AccessType_IFETCH && walkstate.memattrs.memtype == MemType_Normal && !AArch32.S1DCacheEnabled(regime))) then // Treat memory attributes as Normal Non-Cacheable memattrs = NormalNCMemAttr(); memattrs.xs = walkstate.memattrs.xs; else memattrs = walkstate.memattrs; // Shareability value of stage 1 translation subject to stage 2 is IMPLEMENTATION DEFINED // to be either effective value or descriptor value if (regime == Regime_EL10 && AArch32.EL2Enabled(accdesc.ss) && (if ELStateUsingAArch32(EL2, accdesc.ss==SS_Secure) then HCR.VM else HCR_EL2.VM) == '1' && !(boolean IMPLEMENTATION_DEFINED "Apply effective shareability at stage 1")) then memattrs.shareability = walkstate.memattrs.shareability; else memattrs.shareability = EffectiveShareability(memattrs); // Output Address oa = AArch32.SDStageOA(walkstate.baseaddress, va, walkstate.sdftype); ipa = CreateAddressDescriptor(ZeroExtend(va, 64), oa, memattrs); return (fault, ipa, walkstate.sdftype);
// AArch32.S2Translate() // ===================== // Perform a stage 2 translation mapping an IPA to a PA (FaultRecord, AddressDescriptor) AArch32.S2Translate(FaultRecord fault_in, AddressDescriptor ipa, boolean aligned, AccessDescriptor accdesc) FaultRecord fault = fault_in; assert IsZero(ipa.paddress.address<55:40>); if !ELStateUsingAArch32(EL2, accdesc.ss == SS_Secure) then s1aarch64 = FALSE; return AArch64.S2Translate(fault, ipa, s1aarch64, aligned, accdesc); // Prepare fault fields in case a fault is detected fault.statuscode = Fault_None; fault.secondstage = TRUE; fault.s2fs1walk = accdesc.acctype == AccessType_TTW; fault.ipaddress = ipa.paddress; walkparams = AArch32.GetS2TTWParams(); if walkparams.vm == '0' then // Stage 2 is disabled return (fault, ipa); if AArch32.IPAIsOutOfRange(walkparams, ipa.paddress.address<39:0>) then fault.statuscode = Fault_Translation; fault.level = 1; return (fault, AddressDescriptor UNKNOWN); TTWState walkstate; (fault, walkstate) = AArch32.S2Walk(fault, walkparams, accdesc, ipa); if fault.statuscode != Fault_None then return (fault, AddressDescriptor UNKNOWN); if AArch32.S2HasAlignmentFault(accdesc, aligned, walkstate.memattrs) then fault.statuscode = Fault_Alignment; elsif AArch32.S2HasPermissionsFault(walkparams, walkstate.permissions, walkstate.memattrs.memtype, accdesc) then fault.statuscode = Fault_Permission; MemoryAttributes s2_memattrs; if ((accdesc.acctype == AccessType_TTW && walkstate.memattrs.memtype == MemType_Device) || (accdesc.acctype == AccessType_IFETCH && (walkstate.memattrs.memtype == MemType_Device || HCR2.ID == '1')) || (accdesc.acctype != AccessType_IFETCH && walkstate.memattrs.memtype == MemType_Normal && HCR2.CD == '1')) then // Treat memory attributes as Normal Non-Cacheable s2_memattrs = NormalNCMemAttr(); s2_memattrs.xs = walkstate.memattrs.xs; else s2_memattrs = walkstate.memattrs; s2aarch64 = FALSE; memattrs = S2CombineS1MemAttrs(ipa.memattrs, s2_memattrs, s2aarch64); ipa_64 = ZeroExtend(ipa.paddress.address<39:0>, 64); // Output Address oa = StageOA(ipa_64, walkparams.d128, walkparams.tgx, walkstate); pa = CreateAddressDescriptor(ipa.vaddress, oa, memattrs); return (fault, pa);
// AArch32.SDStageOA() // =================== // Given the final walk state of a short-descriptor translation walk, // map the untranslated input address bits to the base output address FullAddress AArch32.SDStageOA(FullAddress baseaddress, bits(32) va, SDFType sdftype) constant integer tsize = SDFSize(sdftype); // Output Address FullAddress oa; oa.address = baseaddress.address<55:tsize> : va<tsize-1:0>; oa.paspace = baseaddress.paspace; return oa;
// AArch32.TranslateAddress() // ========================== // Main entry point for translating an address AddressDescriptor AArch32.TranslateAddress(bits(32) va, AccessDescriptor accdesc, boolean aligned, integer size) Regime regime = TranslationRegime(PSTATE.EL); if !RegimeUsingAArch32(regime) then return AArch64.TranslateAddress(ZeroExtend(va, 64), accdesc, aligned, size); AddressDescriptor result = AArch32.FullTranslate(va, accdesc, aligned); if !IsFault(result) then result.fault = AArch32.CheckDebug(va, accdesc, size); // Update virtual address for abort functions result.vaddress = ZeroExtend(va, 64); return result;
// SDFSize() // ========= // Returns the short-descriptor format translation granule size integer SDFSize(SDFType sdftype) case sdftype of when SDFType_SmallPage return 12; when SDFType_LargePage return 16; when SDFType_Section return 20; when SDFType_Supersection return 24; otherwise Unreachable();
// AArch32.DecodeDescriptorTypeLD() // ================================ // Determine whether the long-descriptor is a page, block or table DescriptorType AArch32.DecodeDescriptorTypeLD(bits(64) descriptor, integer level) if descriptor<1:0> == '11' && level == FINAL_LEVEL then return DescriptorType_Leaf; elsif descriptor<1:0> == '11' then return DescriptorType_Table; elsif descriptor<1:0> == '01' && level != FINAL_LEVEL then return DescriptorType_Leaf; else return DescriptorType_Invalid;
// AArch32.DecodeDescriptorTypeSD() // ================================ // Determine the type of the short-descriptor SDFType AArch32.DecodeDescriptorTypeSD(bits(32) descriptor, integer level) if level == 1 && descriptor<1:0> == '01' then return SDFType_Table; elsif level == 1 && descriptor<18,1> == '01' then return SDFType_Section; elsif level == 1 && descriptor<18,1> == '11' then return SDFType_Supersection; elsif level == 2 && descriptor<1:0> == '01' then return SDFType_LargePage; elsif level == 2 && descriptor<1:0> IN {'1x'} then return SDFType_SmallPage; else return SDFType_Invalid;
// AArch32.S1IASize() // ================== // Retrieve the number of bits containing the input address for stage 1 translation integer AArch32.S1IASize(bits(3) txsz) return 32 - UInt(txsz);
// AArch32.S1WalkLD() // ================== // Traverse stage 1 translation tables in long format to obtain the final descriptor (FaultRecord, TTWState) AArch32.S1WalkLD(FaultRecord fault_in, Regime regime, S1TTWParams walkparams, AccessDescriptor accdesc, bits(32) va) FaultRecord fault = fault_in; bits(3) txsz; bits(64) ttbr; bit epd; VARange varange; if regime == Regime_EL2 then ttbr = HTTBR; txsz = walkparams.t0sz; varange = VARange_LOWER; else varange = AArch32.GetVARange(va, walkparams.t0sz, walkparams.t1sz); bits(64) ttbr0; bits(64) ttbr1; TTBCR_Type ttbcr; if regime == Regime_EL30 then ttbcr = TTBCR_S; ttbr0 = TTBR0_S; ttbr1 = TTBR1_S; elsif HaveAArch32EL(EL3) then ttbcr = TTBCR_NS; ttbr0 = TTBR0_NS; ttbr1 = TTBR1_NS; else ttbcr = TTBCR; ttbr0 = TTBR0; ttbr1 = TTBR1; assert ttbcr.EAE == '1'; if varange == VARange_LOWER then txsz = walkparams.t0sz; ttbr = ttbr0; epd = ttbcr.EPD0; else txsz = walkparams.t1sz; ttbr = ttbr1; epd = ttbcr.EPD1; if regime != Regime_EL2 && epd == '1' then fault.level = 1; fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); // Input Address size iasize = AArch32.S1IASize(txsz); granulebits = TGxGranuleBits(walkparams.tgx); stride = granulebits - 3; startlevel = FINAL_LEVEL - (((iasize-1) - granulebits) DIV stride); levels = FINAL_LEVEL - startlevel; if !IsZero(ttbr<47:40>) then fault.statuscode = Fault_AddressSize; fault.level = 0; return (fault, TTWState UNKNOWN); FullAddress baseaddress; constant integer baselsb = (iasize - (levels*stride + granulebits)) + 3; baseaddress.paspace = if accdesc.ss == SS_Secure then PAS_Secure else PAS_NonSecure; baseaddress.address = ZeroExtend(ttbr<39:baselsb>:Zeros(baselsb), 56); TTWState walkstate; walkstate.baseaddress = baseaddress; walkstate.level = startlevel; walkstate.istable = TRUE; // In regimes that support global and non-global translations, translation // table entries from lookup levels other than the final level of lookup // are treated as being non-global walkstate.nG = if HasUnprivileged(regime) then '1' else '0'; walkstate.memattrs = WalkMemAttrs(walkparams.sh, walkparams.irgn, walkparams.orgn); walkstate.permissions.ap_table = '00'; walkstate.permissions.xn_table = '0'; walkstate.permissions.pxn_table = '0'; bits(64) descriptor; AddressDescriptor walkaddress; walkaddress.vaddress = ZeroExtend(va, 64); if !AArch32.S1DCacheEnabled(regime) then walkaddress.memattrs = NormalNCMemAttr(); walkaddress.memattrs.xs = walkstate.memattrs.xs; else walkaddress.memattrs = walkstate.memattrs; // Shareability value of stage 1 translation subject to stage 2 is IMPLEMENTATION DEFINED // to be either effective value or descriptor value if (regime == Regime_EL10 && AArch32.EL2Enabled(accdesc.ss) && (if ELStateUsingAArch32(EL2, accdesc.ss==SS_Secure) then HCR.VM else HCR_EL2.VM) == '1' && !(boolean IMPLEMENTATION_DEFINED "Apply effective shareability at stage 1")) then walkaddress.memattrs.shareability = walkstate.memattrs.shareability; else walkaddress.memattrs.shareability = EffectiveShareability(walkaddress.memattrs); DescriptorType desctype; integer msb_residue = iasize - 1; repeat fault.level = walkstate.level; constant integer indexlsb = (FINAL_LEVEL - walkstate.level)*stride + granulebits; constant integer indexmsb = msb_residue; bits(40) index = ZeroExtend(va<indexmsb:indexlsb>:'000', 40); walkaddress.paddress.address = walkstate.baseaddress.address OR ZeroExtend(index, 56); walkaddress.paddress.paspace = walkstate.baseaddress.paspace; boolean toplevel = walkstate.level == startlevel; AccessDescriptor walkaccess = CreateAccDescS1TTW(toplevel, varange, accdesc); // If there are two stages of translation, then the first stage table walk addresses // are themselves subject to translation if regime == Regime_EL10 && AArch32.EL2Enabled(accdesc.ss) then s2aligned = TRUE; (s2fault, s2walkaddress) = AArch32.S2Translate(fault, walkaddress, s2aligned, walkaccess); // Check for a fault on the stage 2 walk if s2fault.statuscode != Fault_None then return (s2fault, TTWState UNKNOWN); (fault, descriptor) = FetchDescriptor(walkparams.ee, s2walkaddress, walkaccess, fault, 64); else (fault, descriptor) = FetchDescriptor(walkparams.ee, walkaddress, walkaccess, fault, 64); if fault.statuscode != Fault_None then return (fault, TTWState UNKNOWN); desctype = AArch32.DecodeDescriptorTypeLD(descriptor, walkstate.level); case desctype of when DescriptorType_Table if !IsZero(descriptor<47:40>) then fault.statuscode = Fault_AddressSize; return (fault, TTWState UNKNOWN); walkstate.baseaddress.address = ZeroExtend(descriptor<39:12>:Zeros(12), 56); if walkstate.baseaddress.paspace == PAS_Secure && descriptor<63> == '1' then walkstate.baseaddress.paspace = PAS_NonSecure; if walkparams.hpd == '0' then walkstate.permissions.xn_table = (walkstate.permissions.xn_table OR descriptor<60>); walkstate.permissions.ap_table = (walkstate.permissions.ap_table OR descriptor<62:61>); walkstate.permissions.pxn_table = (walkstate.permissions.pxn_table OR descriptor<59>); walkstate.level = walkstate.level + 1; msb_residue = indexlsb - 1; when DescriptorType_Invalid fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); when DescriptorType_Leaf walkstate.istable = FALSE; until desctype == DescriptorType_Leaf; // Check the output address is inside the supported range if !IsZero(descriptor<47:40>) then fault.statuscode = Fault_AddressSize; return (fault, TTWState UNKNOWN); // Check the access flag if descriptor<10> == '0' then fault.statuscode = Fault_AccessFlag; return (fault, TTWState UNKNOWN); walkstate.permissions.xn = descriptor<54>; walkstate.permissions.pxn = descriptor<53>; walkstate.permissions.ap = descriptor<7:6>:'1'; walkstate.contiguous = descriptor<52>; if regime == Regime_EL2 then // All EL2 regime accesses are treated as Global walkstate.nG = '0'; elsif accdesc.ss == SS_Secure && walkstate.baseaddress.paspace == PAS_NonSecure then // When a PE is using the Long-descriptor translation table format, // and is in Secure state, a translation must be treated as non-global, // regardless of the value of the nG bit, // if NSTable is set to 1 at any level of the translation table walk. walkstate.nG = '1'; else walkstate.nG = descriptor<11>; constant integer indexlsb = (FINAL_LEVEL - walkstate.level)*stride + granulebits; walkstate.baseaddress.address = ZeroExtend(descriptor<39:indexlsb>:Zeros(indexlsb), 56); if walkstate.baseaddress.paspace == PAS_Secure && descriptor<5> == '1' then walkstate.baseaddress.paspace = PAS_NonSecure; memattr = descriptor<4:2>; sh = descriptor<9:8>; attr = AArch32.MAIRAttr(UInt(memattr), walkparams.mair); s1aarch64 = FALSE; walkstate.memattrs = S1DecodeMemAttrs(attr, sh, s1aarch64, walkparams); return (fault, walkstate);
// AArch32.S1WalkSD() // ================== // Traverse stage 1 translation tables in short format to obtain the final descriptor (FaultRecord, TTWState) AArch32.S1WalkSD(FaultRecord fault_in, Regime regime, AccessDescriptor accdesc, bits(32) va) FaultRecord fault = fault_in; SCTLR_Type sctlr; TTBCR_Type ttbcr; TTBR0_Type ttbr0; TTBR1_Type ttbr1; // Determine correct translation control registers to use. if regime == Regime_EL30 then sctlr = SCTLR_S; ttbcr = TTBCR_S; ttbr0 = TTBR0_S; ttbr1 = TTBR1_S; elsif HaveAArch32EL(EL3) then sctlr = SCTLR_NS; ttbcr = TTBCR_NS; ttbr0 = TTBR0_NS; ttbr1 = TTBR1_NS; else sctlr = SCTLR; ttbcr = TTBCR; ttbr0 = TTBR0; ttbr1 = TTBR1; assert ttbcr.EAE == '0'; ee = sctlr.EE; afe = sctlr.AFE; tre = sctlr.TRE; constant integer ttbcr_n = UInt(ttbcr.N); constant boolean use_ttbr0 = IsZero(va<31:(32-ttbcr_n)>); VARange varange = (if ttbcr_n == 0 || use_ttbr0 then VARange_LOWER else VARange_UPPER); constant integer n = if varange == VARange_LOWER then ttbcr_n else 0; bits(32) ttb; bits(1) pd; bits(2) irgn; bits(2) rgn; bits(1) s; bits(1) nos; if varange == VARange_LOWER then ttb = ttbr0.TTB0:Zeros(7); pd = ttbcr.PD0; irgn = ttbr0.IRGN; rgn = ttbr0.RGN; s = ttbr0.S; nos = ttbr0.NOS; else ttb = ttbr1.TTB1:Zeros(7); pd = ttbcr.PD1; irgn = ttbr1.IRGN; rgn = ttbr1.RGN; s = ttbr1.S; nos = ttbr1.NOS; // Check if Translation table walk disabled for translations with this Base register. if pd == '1' then fault.level = 1; fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); FullAddress baseaddress; baseaddress.paspace = if accdesc.ss == SS_Secure then PAS_Secure else PAS_NonSecure; baseaddress.address = ZeroExtend(ttb<31:14-n>:Zeros(14-n), 56); constant integer startlevel = 1; TTWState walkstate; walkstate.baseaddress = baseaddress; // In regimes that support global and non-global translations, translation // table entries from lookup levels other than the final level of lookup // are treated as being non-global. Translations in Short-Descriptor Format // always support global & non-global translations. walkstate.nG = '1'; walkstate.memattrs = WalkMemAttrs(s:nos, irgn, rgn); walkstate.level = startlevel; walkstate.istable = TRUE; bits(4) domain; bits(32) descriptor; AddressDescriptor walkaddress; walkaddress.vaddress = ZeroExtend(va, 64); if !AArch32.S1DCacheEnabled(regime) then walkaddress.memattrs = NormalNCMemAttr(); walkaddress.memattrs.xs = walkstate.memattrs.xs; else walkaddress.memattrs = walkstate.memattrs; // Shareability value of stage 1 translation subject to stage 2 is IMPLEMENTATION DEFINED // to be either effective value or descriptor value if (regime == Regime_EL10 && AArch32.EL2Enabled(accdesc.ss) && (if ELStateUsingAArch32(EL2, accdesc.ss==SS_Secure) then HCR.VM else HCR_EL2.VM) == '1' && !(boolean IMPLEMENTATION_DEFINED "Apply effective shareability at stage 1")) then walkaddress.memattrs.shareability = walkstate.memattrs.shareability; else walkaddress.memattrs.shareability = EffectiveShareability(walkaddress.memattrs); bit nG; bit ns; bit pxn; bits(3) ap; bits(3) tex; bit c; bit b; bit xn; repeat fault.level = walkstate.level; bits(32) index; if walkstate.level == 1 then index = ZeroExtend(va<31-n:20>:'00', 32); else index = ZeroExtend(va<19:12>:'00', 32); walkaddress.paddress.address = walkstate.baseaddress.address OR ZeroExtend(index, 56); walkaddress.paddress.paspace = walkstate.baseaddress.paspace; boolean toplevel = walkstate.level == startlevel; AccessDescriptor walkaccess = CreateAccDescS1TTW(toplevel, varange, accdesc); if regime == Regime_EL10 && AArch32.EL2Enabled(accdesc.ss) then s2aligned = TRUE; (s2fault, s2walkaddress) = AArch32.S2Translate(fault, walkaddress, s2aligned, walkaccess); if s2fault.statuscode != Fault_None then return (s2fault, TTWState UNKNOWN); (fault, descriptor) = FetchDescriptor(ee, s2walkaddress, walkaccess, fault, 32); else (fault, descriptor) = FetchDescriptor(ee, walkaddress, walkaccess, fault, 32); if fault.statuscode != Fault_None then return (fault, TTWState UNKNOWN); walkstate.sdftype = AArch32.DecodeDescriptorTypeSD(descriptor, walkstate.level); case walkstate.sdftype of when SDFType_Invalid fault.domain = domain; fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); when SDFType_Table domain = descriptor<8:5>; ns = descriptor<3>; pxn = descriptor<2>; walkstate.baseaddress.address = ZeroExtend(descriptor<31:10>:Zeros(10), 56); walkstate.level = 2; when SDFType_SmallPage nG = descriptor<11>; s = descriptor<10>; ap = descriptor<9,5:4>; tex = descriptor<8:6>; c = descriptor<3>; b = descriptor<2>; xn = descriptor<0>; walkstate.baseaddress.address = ZeroExtend(descriptor<31:12>:Zeros(12), 56); walkstate.istable = FALSE; when SDFType_LargePage xn = descriptor<15>; tex = descriptor<14:12>; nG = descriptor<11>; s = descriptor<10>; ap = descriptor<9,5:4>; c = descriptor<3>; b = descriptor<2>; walkstate.baseaddress.address = ZeroExtend(descriptor<31:16>:Zeros(16), 56); walkstate.istable = FALSE; when SDFType_Section ns = descriptor<19>; nG = descriptor<17>; s = descriptor<16>; ap = descriptor<15,11:10>; tex = descriptor<14:12>; domain = descriptor<8:5>; xn = descriptor<4>; c = descriptor<3>; b = descriptor<2>; pxn = descriptor<0>; walkstate.baseaddress.address = ZeroExtend(descriptor<31:20>:Zeros(20), 56); walkstate.istable = FALSE; when SDFType_Supersection ns = descriptor<19>; nG = descriptor<17>; s = descriptor<16>; ap = descriptor<15,11:10>; tex = descriptor<14:12>; xn = descriptor<4>; c = descriptor<3>; b = descriptor<2>; pxn = descriptor<0>; domain = '0000'; walkstate.baseaddress.address = ZeroExtend(descriptor<8:5,23:20,31:24>:Zeros(24), 56); walkstate.istable = FALSE; until walkstate.sdftype != SDFType_Table; if afe == '1' && ap<0> == '0' then fault.domain = domain; fault.statuscode = Fault_AccessFlag; return (fault, TTWState UNKNOWN); // Decode the TEX, C, B and S bits to produce target memory attributes if tre == '1' then walkstate.memattrs = AArch32.RemappedTEXDecode(regime, tex, c, b, s); elsif RemapRegsHaveResetValues() then walkstate.memattrs = AArch32.DefaultTEXDecode(tex, c, b, s); else walkstate.memattrs = MemoryAttributes IMPLEMENTATION_DEFINED; walkstate.permissions.ap = ap; walkstate.permissions.xn = xn; walkstate.permissions.pxn = pxn; walkstate.domain = domain; walkstate.nG = nG; if accdesc.ss == SS_Secure && ns == '0' then walkstate.baseaddress.paspace = PAS_Secure; else walkstate.baseaddress.paspace = PAS_NonSecure; return (fault, walkstate);
// AArch32.S2IASize() // ================== // Retrieve the number of bits containing the input address for stage 2 translation integer AArch32.S2IASize(bits(4) t0sz) return 32 - SInt(t0sz);
// AArch32.S2StartLevel() // ====================== // Determine the initial lookup level when performing a stage 2 translation // table walk integer AArch32.S2StartLevel(bits(2) sl0) return 2 - UInt(sl0);
// AArch32.S2Walk() // ================ // Traverse stage 2 translation tables in long format to obtain the final descriptor (FaultRecord, TTWState) AArch32.S2Walk(FaultRecord fault_in, S2TTWParams walkparams, AccessDescriptor accdesc, AddressDescriptor ipa) FaultRecord fault = fault_in; if walkparams.sl0 IN {'1x'} || AArch32.S2InconsistentSL(walkparams) then fault.statuscode = Fault_Translation; fault.level = 1; return (fault, TTWState UNKNOWN); // Input Address size iasize = AArch32.S2IASize(walkparams.t0sz); startlevel = AArch32.S2StartLevel(walkparams.sl0); levels = FINAL_LEVEL - startlevel; granulebits = TGxGranuleBits(walkparams.tgx); stride = granulebits - 3; if !IsZero(VTTBR<47:40>) then fault.statuscode = Fault_AddressSize; fault.level = 0; return (fault, TTWState UNKNOWN); FullAddress baseaddress; constant integer baselsb = (iasize - (levels*stride + granulebits)) + 3; baseaddress.paspace = PAS_NonSecure; baseaddress.address = ZeroExtend(VTTBR<39:baselsb>:Zeros(baselsb), 56); TTWState walkstate; walkstate.baseaddress = baseaddress; walkstate.level = startlevel; walkstate.istable = TRUE; walkstate.memattrs = WalkMemAttrs(walkparams.sh, walkparams.irgn, walkparams.orgn); bits(64) descriptor; AccessDescriptor walkaccess = CreateAccDescS2TTW(accdesc); AddressDescriptor walkaddress; walkaddress.vaddress = ipa.vaddress; if HCR2.CD == '1' then walkaddress.memattrs = NormalNCMemAttr(); walkaddress.memattrs.xs = walkstate.memattrs.xs; else walkaddress.memattrs = walkstate.memattrs; walkaddress.memattrs.shareability = EffectiveShareability(walkaddress.memattrs); integer msb_residual = iasize - 1; DescriptorType desctype; repeat fault.level = walkstate.level; constant integer indexlsb = (FINAL_LEVEL - walkstate.level)*stride + granulebits; constant integer indexmsb = msb_residual; bits(40) index = ZeroExtend(ipa.paddress.address<indexmsb:indexlsb>:'000', 40); walkaddress.paddress.address = walkstate.baseaddress.address OR ZeroExtend(index, 56); walkaddress.paddress.paspace = walkstate.baseaddress.paspace; (fault, descriptor) = FetchDescriptor(walkparams.ee, walkaddress, walkaccess, fault, 64); if fault.statuscode != Fault_None then return (fault, TTWState UNKNOWN); desctype = AArch32.DecodeDescriptorTypeLD(descriptor, walkstate.level); case desctype of when DescriptorType_Table if !IsZero(descriptor<47:40>) then fault.statuscode = Fault_AddressSize; return (fault, TTWState UNKNOWN); walkstate.baseaddress.address = ZeroExtend(descriptor<39:12>:Zeros(12), 56); walkstate.level = walkstate.level + 1; msb_residual = indexlsb - 1; when DescriptorType_Invalid fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); when DescriptorType_Leaf walkstate.istable = FALSE; until desctype IN {DescriptorType_Leaf}; // Check the output address is inside the supported range if !IsZero(descriptor<47:40>) then fault.statuscode = Fault_AddressSize; return (fault, TTWState UNKNOWN); // Check the access flag if descriptor<10> == '0' then fault.statuscode = Fault_AccessFlag; return (fault, TTWState UNKNOWN); // Unpack the descriptor into address and upper and lower block attributes constant integer indexlsb = (FINAL_LEVEL - walkstate.level)*stride + granulebits; walkstate.baseaddress.address = ZeroExtend(descriptor<39:indexlsb>:Zeros(indexlsb), 56); walkstate.permissions.s2ap = descriptor<7:6>; walkstate.permissions.s2xn = descriptor<54>; if IsFeatureImplemented(FEAT_XNX) then walkstate.permissions.s2xnx = descriptor<53>; else walkstate.permissions.s2xnx = '0'; memattr = descriptor<5:2>; sh = descriptor<9:8>; s2aarch64 = FALSE; walkstate.memattrs = S2DecodeMemAttrs(memattr, sh, s2aarch64); walkstate.contiguous = descriptor<52>; return (fault, walkstate);
// AArch32.TranslationSizeSD() // =========================== // Determine the size of the translation integer AArch32.TranslationSizeSD(SDFType sdftype) integer tsize; case sdftype of when SDFType_SmallPage tsize = 12; when SDFType_LargePage tsize = 16; when SDFType_Section tsize = 20; when SDFType_Supersection tsize = 24; return tsize;
// RemapRegsHaveResetValues() // ========================== boolean RemapRegsHaveResetValues();
// AArch32.GetS1TTWParams() // ======================== // Returns stage 1 translation table walk parameters from respective controlling // System registers. S1TTWParams AArch32.GetS1TTWParams(Regime regime, bits(32) va) S1TTWParams walkparams; case regime of when Regime_EL2 walkparams = AArch32.S1TTWParamsEL2(); when Regime_EL10 walkparams = AArch32.S1TTWParamsEL10(va); when Regime_EL30 walkparams = AArch32.S1TTWParamsEL30(va); return walkparams;
// AArch32.GetS2TTWParams() // ======================== // Gather walk parameters for stage 2 translation S2TTWParams AArch32.GetS2TTWParams() S2TTWParams walkparams; walkparams.tgx = TGx_4KB; walkparams.s = VTCR.S; walkparams.t0sz = VTCR.T0SZ; walkparams.sl0 = VTCR.SL0; walkparams.irgn = VTCR.IRGN0; walkparams.orgn = VTCR.ORGN0; walkparams.sh = VTCR.SH0; walkparams.ee = HSCTLR.EE; walkparams.ptw = HCR.PTW; walkparams.vm = HCR.VM OR HCR.DC; // VTCR.S must match VTCR.T0SZ[3] if walkparams.s != walkparams.t0sz<3> then (-, walkparams.t0sz) = ConstrainUnpredictableBits(Unpredictable_RESVTCRS, 4); return walkparams;
// AArch32.GetVARange() // ==================== // Select the translation base address for stage 1 long-descriptor walks VARange AArch32.GetVARange(bits(32) va, bits(3) t0sz, bits(3) t1sz) // Lower range Input Address size constant integer lo_iasize = AArch32.S1IASize(t0sz); // Upper range Input Address size constant integer up_iasize = AArch32.S1IASize(t1sz); if t1sz == '000' && t0sz == '000' then return VARange_LOWER; elsif t1sz == '000' then return if IsZero(va<31:lo_iasize>) then VARange_LOWER else VARange_UPPER; elsif t0sz == '000' then return if IsOnes(va<31:up_iasize>) then VARange_UPPER else VARange_LOWER; elsif IsZero(va<31:lo_iasize>) then return VARange_LOWER; elsif IsOnes(va<31:up_iasize>) then return VARange_UPPER; else // Will be reported as a Translation Fault return VARange UNKNOWN;
// AArch32.S1DCacheEnabled() // ========================= // Determine cacheability of stage 1 data accesses boolean AArch32.S1DCacheEnabled(Regime regime) case regime of when Regime_EL30 return SCTLR_S.C == '1'; when Regime_EL2 return HSCTLR.C == '1'; when Regime_EL10 return (if HaveAArch32EL(EL3) then SCTLR_NS.C else SCTLR.C) == '1';
// AArch32.S1ICacheEnabled() // ========================= // Determine cacheability of stage 1 instruction fetches boolean AArch32.S1ICacheEnabled(Regime regime) case regime of when Regime_EL30 return SCTLR_S.I == '1'; when Regime_EL2 return HSCTLR.I == '1'; when Regime_EL10 return (if HaveAArch32EL(EL3) then SCTLR_NS.I else SCTLR.I) == '1';
// AArch32.S1TTWParamsEL10() // ========================= // Gather stage 1 translation table walk parameters for EL1&0 regime // (with EL2 enabled or disabled). S1TTWParams AArch32.S1TTWParamsEL10(bits(32) va) bits(64) mair; bit sif; TTBCR_Type ttbcr; TTBCR2_Type ttbcr2; SCTLR_Type sctlr; if ELUsingAArch32(EL3) then ttbcr = TTBCR_NS; ttbcr2 = TTBCR2_NS; sctlr = SCTLR_NS; mair = MAIR1_NS:MAIR0_NS; sif = SCR.SIF; else ttbcr = TTBCR; ttbcr2 = TTBCR2; sctlr = SCTLR; mair = MAIR1:MAIR0; sif = if HaveEL(EL3) then SCR_EL3.SIF else '0'; assert ttbcr.EAE == '1'; S1TTWParams walkparams; walkparams.t0sz = ttbcr.T0SZ; walkparams.t1sz = ttbcr.T1SZ; walkparams.ee = sctlr.EE; walkparams.wxn = sctlr.WXN; walkparams.uwxn = sctlr.UWXN; walkparams.ntlsmd = if IsFeatureImplemented(FEAT_LSMAOC) then sctlr.nTLSMD else '1'; walkparams.mair = mair; walkparams.sif = sif; varange = AArch32.GetVARange(va, walkparams.t0sz, walkparams.t1sz); if varange == VARange_LOWER then walkparams.sh = ttbcr.SH0; walkparams.irgn = ttbcr.IRGN0; walkparams.orgn = ttbcr.ORGN0; walkparams.hpd = (if IsFeatureImplemented(FEAT_AA32HPD) then ttbcr.T2E AND ttbcr2.HPD0 else '0'); else walkparams.sh = ttbcr.SH1; walkparams.irgn = ttbcr.IRGN1; walkparams.orgn = ttbcr.ORGN1; walkparams.hpd = (if IsFeatureImplemented(FEAT_AA32HPD) then ttbcr.T2E AND ttbcr2.HPD1 else '0'); return walkparams;
// AArch32.S1TTWParamsEL2() // ======================== // Gather stage 1 translation table walk parameters for EL2 regime S1TTWParams AArch32.S1TTWParamsEL2() S1TTWParams walkparams; walkparams.tgx = TGx_4KB; walkparams.t0sz = HTCR.T0SZ; walkparams.irgn = HTCR.SH0; walkparams.orgn = HTCR.IRGN0; walkparams.sh = HTCR.ORGN0; walkparams.hpd = if IsFeatureImplemented(FEAT_AA32HPD) then HTCR.HPD else '0'; walkparams.ee = HSCTLR.EE; walkparams.wxn = HSCTLR.WXN; if IsFeatureImplemented(FEAT_LSMAOC) then walkparams.ntlsmd = HSCTLR.nTLSMD; else walkparams.ntlsmd = '1'; walkparams.mair = HMAIR1:HMAIR0; return walkparams;
// AArch32.S1TTWParamsEL30() // ========================= // Gather stage 1 translation table walk parameters for EL3&0 regime S1TTWParams AArch32.S1TTWParamsEL30(bits(32) va) assert TTBCR_S.EAE == '1'; S1TTWParams walkparams; walkparams.t0sz = TTBCR_S.T0SZ; walkparams.t1sz = TTBCR_S.T1SZ; walkparams.ee = SCTLR_S.EE; walkparams.wxn = SCTLR_S.WXN; walkparams.uwxn = SCTLR_S.UWXN; walkparams.ntlsmd = if IsFeatureImplemented(FEAT_LSMAOC) then SCTLR_S.nTLSMD else '1'; walkparams.mair = MAIR1_S:MAIR0_S; walkparams.sif = SCR.SIF; varange = AArch32.GetVARange(va, walkparams.t0sz, walkparams.t1sz); if varange == VARange_LOWER then walkparams.sh = TTBCR_S.SH0; walkparams.irgn = TTBCR_S.IRGN0; walkparams.orgn = TTBCR_S.ORGN0; walkparams.hpd = (if IsFeatureImplemented(FEAT_AA32HPD) then TTBCR_S.T2E AND TTBCR2_S.HPD0 else '0'); else walkparams.sh = TTBCR_S.SH1; walkparams.irgn = TTBCR_S.IRGN1; walkparams.orgn = TTBCR_S.ORGN1; walkparams.hpd = (if IsFeatureImplemented(FEAT_AA32HPD) then TTBCR_S.T2E AND TTBCR2_S.HPD1 else '0'); return walkparams;
// BRBCycleCountingEnabled() // ========================= // Returns TRUE if the recording of cycle counts is allowed, // FALSE otherwise. boolean BRBCycleCountingEnabled() if HaveEL(EL2) && BRBCR_EL2.CC == '0' then return FALSE; if BRBCR_EL1.CC == '0' then return FALSE; return TRUE;
// BRBEBranch() // ============ // Called to write branch record for the following branches when BRB is active: // direct branches, // indirect branches, // direct branches with link, // indirect branches with link, // returns from subroutines. BRBEBranch(BranchType br_type, boolean cond, bits(64) target_address) if BranchRecordAllowed(PSTATE.EL) && FilterBranchRecord(br_type, cond) then bits(6) branch_type; case br_type of when BranchType_DIR branch_type = if cond then '001000' else '000000'; when BranchType_INDIR branch_type = '000001'; when BranchType_DIRCALL branch_type = '000010'; when BranchType_INDCALL branch_type = '000011'; when BranchType_RET branch_type = '000101'; otherwise Unreachable(); bit ccu; bits(14) cc; (ccu, cc) = BranchEncCycleCount(); bit lastfailed = if IsFeatureImplemented(FEAT_TME) then BRBFCR_EL1.LASTFAILED else '0'; bit transactional = if IsFeatureImplemented(FEAT_TME) && TSTATE.depth > 0 then '1' else '0'; bits(2) el = PSTATE.EL; bit mispredict = if BRBEMispredictAllowed() && BranchMispredict() then '1' else '0'; UpdateBranchRecordBuffer(ccu, cc, lastfailed, transactional, branch_type, el, mispredict, '11', PC64, target_address); BRBFCR_EL1.LASTFAILED = '0'; PMUEvent(PMU_EVENT_BRB_FILTRATE); return;
// BRBEBranchOnISB() // ================= // Returns TRUE if ISBs generate Branch records, and FALSE otherwise. boolean BRBEBranchOnISB() return boolean IMPLEMENTATION_DEFINED "ISB generates Branch records";
// BRBEDebugStateExit() // ==================== // Called to write Debug state exit branch record when BRB is active. BRBEDebugStateExit(bits(64) target_address) if BranchRecordAllowed(PSTATE.EL) then // Debug state is a prohibited region, therefore ccu=1, cc=0, source_address=0 bits(6) branch_type = '111001'; bit ccu = '1'; bits(14) cc = Zeros(14); bit lastfailed = if IsFeatureImplemented(FEAT_TME) then BRBFCR_EL1.LASTFAILED else '0'; bit transactional = '0'; bits(2) el = PSTATE.EL; bit mispredict = '0'; UpdateBranchRecordBuffer(ccu, cc, lastfailed, transactional, branch_type, el, mispredict, '01', Zeros(64), target_address); BRBFCR_EL1.LASTFAILED = '0'; PMUEvent(PMU_EVENT_BRB_FILTRATE); return;
// BRBEException() // =============== // Called to write exception branch record when BRB is active. BRBEException(ExceptionRecord erec, boolean source_valid, bits(64) source_address_in, bits(64) target_address_in, bits(2) target_el, boolean trappedsyscallinst) bits(64) target_address = target_address_in; Exception except = erec.exceptype; bits(25) iss = erec.syndrome; case target_el of when EL3 if !IsFeatureImplemented(FEAT_BRBEv1p1) || (MDCR_EL3.E3BREC == MDCR_EL3.E3BREW) then return; when EL2 if BRBCR_EL2.EXCEPTION == '0' then return; when EL1 if BRBCR_EL1.EXCEPTION == '0' then return; boolean target_valid = BranchRecordAllowed(target_el); if source_valid || target_valid then bits(6) branch_type; case except of when Exception_Uncategorized branch_type = '100011'; // Trap when Exception_WFxTrap branch_type = '100011'; // Trap when Exception_CP15RTTrap branch_type = '100011'; // Trap when Exception_CP15RRTTrap branch_type = '100011'; // Trap when Exception_CP14RTTrap branch_type = '100011'; // Trap when Exception_CP14DTTrap branch_type = '100011'; // Trap when Exception_AdvSIMDFPAccessTrap branch_type = '100011'; // Trap when Exception_FPIDTrap branch_type = '100011'; // Trap when Exception_PACTrap branch_type = '100011'; // Trap when Exception_TSTARTAccessTrap branch_type = '100011'; // Trap when Exception_CP14RRTTrap branch_type = '100011'; // Trap when Exception_LDST64BTrap branch_type = '100011'; // Trap when Exception_BranchTarget branch_type = '101011'; // Inst Fault when Exception_IllegalState branch_type = '100011'; // Trap when Exception_SupervisorCall if !trappedsyscallinst then branch_type = '100010'; // Call else branch_type = '100011'; // Trap when Exception_HypervisorCall branch_type = '100010'; // Call when Exception_MonitorCall if !trappedsyscallinst then branch_type = '100010'; // Call else branch_type = '100011'; // Trap when Exception_SystemRegisterTrap branch_type = '100011'; // Trap when Exception_SystemRegister128Trap branch_type = '100011'; // Trap when Exception_SVEAccessTrap branch_type = '100011'; // Trap when Exception_SMEAccessTrap branch_type = '100011'; // Trap when Exception_ERetTrap branch_type = '100011'; // Trap when Exception_PACFail branch_type = '101100'; // Data Fault when Exception_InstructionAbort branch_type = '101011'; // Inst Fault when Exception_PCAlignment branch_type = '101010'; // Alignment when Exception_DataAbort branch_type = '101100'; // Data Fault when Exception_NV2DataAbort branch_type = '101100'; // Data Fault when Exception_SPAlignment branch_type = '101010'; // Alignment when Exception_FPTrappedException branch_type = '100011'; // Trap when Exception_SError branch_type = '100100'; // System Error when Exception_Breakpoint branch_type = '100110'; // Inst debug when Exception_SoftwareStep branch_type = '100110'; // Inst debug when Exception_Watchpoint branch_type = '100111'; // Data debug when Exception_NV2Watchpoint branch_type = '100111'; // Data debug when Exception_SoftwareBreakpoint branch_type = '100110'; // Inst debug when Exception_IRQ branch_type = '101110'; // IRQ when Exception_FIQ branch_type = '101111'; // FIQ when Exception_MemCpyMemSet branch_type = '100011'; // Trap when Exception_GCSFail if iss<23:20> == '0000' then branch_type = '101100'; // Data Fault elsif iss<23:20> == '0001' then branch_type = '101011'; // Inst Fault elsif iss<23:20> == '0010' then branch_type = '100011'; // Trap else Unreachable(); when Exception_PMU branch_type = '100110'; // Inst debug; when Exception_GPC if iss<20> == '1' then branch_type = '101011'; // Inst fault else branch_type = '101100'; // Data fault otherwise Unreachable(); bit ccu; bits(14) cc; (ccu, cc) = BranchEncCycleCount(); bit lastfailed = if IsFeatureImplemented(FEAT_TME) then BRBFCR_EL1.LASTFAILED else '0'; bit transactional = (if source_valid && IsFeatureImplemented(FEAT_TME) && TSTATE.depth > 0 then '1' else '0'); bits(2) el = if target_valid then target_el else '00'; bit mispredict = '0'; bit sv = if source_valid then '1' else '0'; bit tv = if target_valid then '1' else '0'; bits(64) source_address = if source_valid then source_address_in else Zeros(64); if !target_valid then target_address = Zeros(64); else target_address = AArch64.BranchAddr(target_address, target_el); UpdateBranchRecordBuffer(ccu, cc, lastfailed, transactional, branch_type, el, mispredict, sv:tv, source_address, target_address); BRBFCR_EL1.LASTFAILED = '0'; PMUEvent(PMU_EVENT_BRB_FILTRATE); return;
// BRBEExceptionReturn() // ===================== // Called to write exception return branch record when BRB is active. BRBEExceptionReturn(bits(64) target_address_in, bits(2) source_el, boolean source_valid, bits(64) source_address_in) bits(64) target_address = target_address_in; case source_el of when EL3 if !IsFeatureImplemented(FEAT_BRBEv1p1) || (MDCR_EL3.E3BREC == MDCR_EL3.E3BREW) then return; when EL2 if BRBCR_EL2.ERTN == '0' then return; when EL1 if BRBCR_EL1.ERTN == '0' then return; boolean target_valid = BranchRecordAllowed(PSTATE.EL); if source_valid || target_valid then bits(6) branch_type = '000111'; bit ccu; bits(14) cc; (ccu, cc) = BranchEncCycleCount(); bit lastfailed = if IsFeatureImplemented(FEAT_TME) then BRBFCR_EL1.LASTFAILED else '0'; bit transactional = (if source_valid && IsFeatureImplemented(FEAT_TME) && TSTATE.depth > 0 then '1' else '0'); bits(2) el = if target_valid then PSTATE.EL else '00'; bit mispredict = if (source_valid && BRBEMispredictAllowed() && BranchMispredict()) then '1' else '0'; bit sv = if source_valid then '1' else '0'; bit tv = if target_valid then '1' else '0'; bits(64) source_address = if source_valid then source_address_in else Zeros(64); if !target_valid then target_address = Zeros(64); UpdateBranchRecordBuffer(ccu, cc, lastfailed, transactional, branch_type, el, mispredict, sv:tv, source_address, target_address); BRBFCR_EL1.LASTFAILED = '0'; PMUEvent(PMU_EVENT_BRB_FILTRATE); return;
// BRBEFreeze() // ============ // Generates BRBE freeze event. BRBEFreeze() BRBFCR_EL1.PAUSED = '1'; BRBTS_EL1 = GetTimestamp(BRBETimeStamp());
// BRBEISB() // ========= // Handles ISB instruction for BRBE. BRBEISB() boolean branch_conditional = FALSE; BRBEBranch(BranchType_DIR, branch_conditional, PC64 + 4);
// BRBEMispredictAllowed() // ======================= // Returns TRUE if the recording of branch misprediction is allowed, // FALSE otherwise. boolean BRBEMispredictAllowed() if HaveEL(EL2) && BRBCR_EL2.MPRED == '0' then return FALSE; if BRBCR_EL1.MPRED == '0' then return FALSE; return TRUE;
// BRBETimeStamp() // =============== // Returns captured timestamp. TimeStamp BRBETimeStamp() if HaveEL(EL2) then TS_el2 = BRBCR_EL2.TS; if !IsFeatureImplemented(FEAT_ECV) && TS_el2 == '10' then // Reserved value (-, TS_el2) = ConstrainUnpredictableBits(Unpredictable_EL2TIMESTAMP, 2); case TS_el2 of when '00' // Falls out to check BRBCR_EL1.TS when '01' return TimeStamp_Virtual; when '10' assert IsFeatureImplemented(FEAT_ECV); // Otherwise ConstrainUnpredictableBits // removes this case return TimeStamp_OffsetPhysical; when '11' return TimeStamp_Physical; TS_el1 = BRBCR_EL1.TS; if TS_el1 == '00' || (!IsFeatureImplemented(FEAT_ECV) && TS_el1 == '10') then // Reserved value (-, TS_el1) = ConstrainUnpredictableBits(Unpredictable_EL1TIMESTAMP, 2); case TS_el1 of when '01' return TimeStamp_Virtual; when '10' return TimeStamp_OffsetPhysical; when '11' return TimeStamp_Physical; otherwise Unreachable(); // ConstrainUnpredictableBits removes this case
// BRB_IALL() // ========== // Called to perform invalidation of branch records BRB_IALL() for i = 0 to GetBRBENumRecords() - 1 Records_SRC[i] = Zeros(64); Records_TGT[i] = Zeros(64); Records_INF[i] = Zeros(64);
// BRB_INJ() // ========= // Called to perform manual injection of branch records. BRB_INJ() UpdateBranchRecordBuffer(BRBINFINJ_EL1.CCU, BRBINFINJ_EL1.CC, BRBINFINJ_EL1.LASTFAILED, BRBINFINJ_EL1.T, BRBINFINJ_EL1.TYPE, BRBINFINJ_EL1.EL, BRBINFINJ_EL1.MPRED, BRBINFINJ_EL1.VALID, BRBSRCINJ_EL1.ADDRESS, BRBTGTINJ_EL1.ADDRESS); BRBINFINJ_EL1 = bits(64) UNKNOWN; BRBSRCINJ_EL1 = bits(64) UNKNOWN; BRBTGTINJ_EL1 = bits(64) UNKNOWN; if ConstrainUnpredictableBool(Unpredictable_BRBFILTRATE) then PMUEvent(PMU_EVENT_BRB_FILTRATE);
// BranchEncCycleCount() // ===================== // The first return result is '1' if either of the following is true, and '0' otherwise: // - This is the first Branch record after the PE exited a Prohibited Region. // - This is the first Branch record after cycle counting has been enabled. // If the first return return is '0', the second return result is the encoded cycle count // since the last branch. // The format of this field uses a mantissa and exponent to express the cycle count value. // - bits[7:0] indicate the mantissa M. // - bits[13:8] indicate the exponent E. // The cycle count is expressed using the following function: // cycle_count = (if IsZero(E) then UInt(M) else UInt('1':M:Zeros(UInt(E)-1))) // A value of all ones in both the mantissa and exponent indicates the cycle count value // exceeded the size of the cycle counter. // If the cycle count is not known, the second return result is zero. (bit, bits(14)) BranchEncCycleCount();
// BranchMispredict() // ================== // Returns TRUE if the branch being executed was mispredicted, FALSE otherwise. boolean BranchMispredict();
// BranchRawCycleCount() // ===================== // If the cycle count is known, the return result is the cycle count since the last branch. integer BranchRawCycleCount();
// BranchRecordAllowed() // ===================== // Returns TRUE if branch recording is allowed, FALSE otherwise. boolean BranchRecordAllowed(bits(2) el) if ELUsingAArch32(el) then return FALSE; if BRBFCR_EL1.PAUSED == '1' then return FALSE; if el == EL3 && IsFeatureImplemented(FEAT_BRBEv1p1) then return (MDCR_EL3.E3BREC != MDCR_EL3.E3BREW); if HaveEL(EL3) && (MDCR_EL3.SBRBE == '00' || (CurrentSecurityState() == SS_Secure && MDCR_EL3.SBRBE == '01')) then return FALSE; case el of when EL3 return FALSE; // FEAT_BRBEv1p1 not implemented when EL2 return BRBCR_EL2.E2BRE == '1'; when EL1 return BRBCR_EL1.E1BRE == '1'; when EL0 if EL2Enabled() && HCR_EL2.TGE == '1' then return BRBCR_EL2.E0HBRE == '1'; else return BRBCR_EL1.E0BRE == '1';
// Contents of the Branch Record Buffer //===================================== array [0..63] of BRBSRC_EL1_Type Records_SRC; array [0..63] of BRBTGT_EL1_Type Records_TGT; array [0..63] of BRBINF_EL1_Type Records_INF;
// FilterBranchRecord() // ==================== // Returns TRUE if the branch record is not filtered out, FALSE otherwise. boolean FilterBranchRecord(BranchType br, boolean cond) case br of when BranchType_DIRCALL return BRBFCR_EL1.DIRCALL != BRBFCR_EL1.EnI; when BranchType_INDCALL return BRBFCR_EL1.INDCALL != BRBFCR_EL1.EnI; when BranchType_RET return BRBFCR_EL1.RTN != BRBFCR_EL1.EnI; when BranchType_DIR if cond then return BRBFCR_EL1.CONDDIR != BRBFCR_EL1.EnI; else return BRBFCR_EL1.DIRECT != BRBFCR_EL1.EnI; when BranchType_INDIR return BRBFCR_EL1.INDIRECT != BRBFCR_EL1.EnI; otherwise Unreachable(); return FALSE;
// FirstBranchAfterProhibited() // ============================ // Returns TRUE if branch recorded is the first branch after a prohibited region, // FALSE otherwise. FirstBranchAfterProhibited();
// GetBRBENumRecords() // =================== // Returns the number of branch records implemented. integer GetBRBENumRecords() assert UInt(BRBIDR0_EL1.NUMREC) IN {0x08, 0x10, 0x20, 0x40}; return integer IMPLEMENTATION_DEFINED "Number of BRB records";
// Getter functions for branch records // =================================== // Functions used by MRS instructions that access branch records BRBSRC_EL1_Type BRBSRC_EL1[integer n] assert n IN {0..31}; integer record = UInt(BRBFCR_EL1.BANK:n<4:0>); if record < GetBRBENumRecords() then return Records_SRC[record]; else return Zeros(64); BRBTGT_EL1_Type BRBTGT_EL1[integer n] assert n IN {0..31}; integer record = UInt(BRBFCR_EL1.BANK:n<4:0>); if record < GetBRBENumRecords() then return Records_TGT[record]; else return Zeros(64); BRBINF_EL1_Type BRBINF_EL1[integer n] assert n IN {0..31}; integer record = UInt(BRBFCR_EL1.BANK:n<4:0>); if record < GetBRBENumRecords() then return Records_INF[record]; else return Zeros(64);
// ShouldBRBEFreeze() // ================== // Returns TRUE if the BRBE freeze event conditions have been met, and FALSE otherwise. boolean ShouldBRBEFreeze() if !BranchRecordAllowed(PSTATE.EL) then return FALSE; boolean check_e = FALSE; boolean check_cnten = FALSE; boolean check_inten = FALSE; boolean exclude_sync = IsFeatureImplemented(FEAT_SEBEP); boolean exclude_cyc = TRUE; boolean include_lo; boolean include_hi; if HaveEL(EL2) then include_lo = (BRBCR_EL1.FZP == '1'); include_hi = (BRBCR_EL2.FZP == '1'); else include_lo = TRUE; include_hi = TRUE; return PMUOverflowCondition(check_e, check_cnten, check_inten, include_hi, include_lo, exclude_cyc, exclude_sync);
// UpdateBranchRecordBuffer() // ========================== // Add a new Branch record to the buffer. UpdateBranchRecordBuffer(bit ccu, bits(14) cc, bit lastfailed, bit transactional, bits(6) branch_type, bits(2) el, bit mispredict, bits(2) valid, bits(64) source_address, bits(64) target_address) // Shift the Branch Records in the buffer for i = GetBRBENumRecords() - 1 downto 1 Records_SRC[i] = Records_SRC[i - 1]; Records_TGT[i] = Records_TGT[i - 1]; Records_INF[i] = Records_INF[i - 1]; Records_INF[0].CCU = ccu; Records_INF[0].CC = cc; Records_INF[0].EL = el; Records_INF[0].VALID = valid; Records_INF[0].T = transactional; Records_INF[0].LASTFAILED = lastfailed; Records_INF[0].MPRED = mispredict; Records_INF[0].TYPE = branch_type; Records_SRC[0] = source_address; Records_TGT[0] = target_address; return;
// AArch64.BreakpointMatch() // ========================= // Breakpoint matching in an AArch64 translation regime. // Returns breakpoint type and a boolean to indicate the type of breakpoint and whether // the breakpoint is active and matched successfully. For Address Mismatch breakpoints, // the returned boolean is the inverted result. (BreakpointType, boolean) AArch64.BreakpointMatch(integer n, bits(64) vaddress, AccessDescriptor accdesc, integer size) assert !ELUsingAArch32(S1TranslationRegime()); assert n < NumBreakpointsImplemented(); linking_enabled = (DBGBCR_EL1[n].BT IN {'0x11', '1xx1'} || (IsFeatureImplemented(FEAT_ABLE) && DBGBCR_EL1[n].BT2 == '1')); // A breakpoint that has linking enabled does not generate debug events in isolation if linking_enabled then return (BreakpointType_Inactive, FALSE); enabled = IsBreakpointEnabled(n); linked = DBGBCR_EL1[n].BT IN {'0x01'}; isbreakpnt = TRUE; linked_to = FALSE; from_linking_enabled = FALSE; lbnx = if IsFeatureImplemented(FEAT_Debugv8p9) then DBGBCR_EL1[n].LBNX else '00'; linked_n = UInt(lbnx : DBGBCR_EL1[n].LBN); ssce = if IsFeatureImplemented(FEAT_RME) then DBGBCR_EL1[n].SSCE else '0'; state_match = AArch64.StateMatch(DBGBCR_EL1[n].SSC, ssce, DBGBCR_EL1[n].HMC, DBGBCR_EL1[n].PMC, linked, linked_n, isbreakpnt, vaddress, accdesc); (bp_type, value_match) = AArch64.BreakpointValueMatch(n, vaddress, linked_to, isbreakpnt, from_linking_enabled); if HaveAArch32() && size == 4 then // Check second halfword // If the breakpoint address and BAS of an Address breakpoint match the address of the // second halfword of an instruction, but not the address of the first halfword, it is // CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug // event. (-, match_i) = AArch64.BreakpointValueMatch(n, vaddress + 2, linked_to, isbreakpnt, from_linking_enabled); if !value_match && match_i then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if vaddress<1> == '1' && DBGBCR_EL1[n].BAS == '1111' then // The above notwithstanding, if DBGBCR_EL1[n].BAS == '1111', then it is CONSTRAINED // UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction // at the address DBGBVR_EL1[n]+2. if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if !(state_match && enabled) then return (BreakpointType_Inactive, FALSE); else return (bp_type, value_match);
// AArch64.BreakpointValueMatch() // ============================== // Returns breakpoint type to indicate the type of breakpoint and a boolean to indicate // whether the breakpoint matched successfully. For Address Mismatch breakpoints, the // returned boolean is the inverted result. If the breakpoint type return value is Inactive, // then the boolean result is FALSE. (BreakpointType, boolean) AArch64.BreakpointValueMatch(integer n_in, bits(64) vaddress, boolean linked_to, boolean isbreakpnt, boolean from_linking_enabled) // "n_in" is the identity of the breakpoint unit to match against. // "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context // matching breakpoints. // "linked_to" is TRUE if this is a call from StateMatch for linking. // "isbreakpnt" TRUE is this is a call from BreakpointMatch or from StateMatch for a // linked breakpoint or from BreakpointValueMatch for a linked breakpoint with linking enabled. // "from_linking_enabled" is TRUE if this is a call from BreakpointValueMatch for a linked // breakpoint with linking enabled. integer n = n_in; Constraint c; // If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives // no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint. if n >= NumBreakpointsImplemented() then (c, n) = ConstrainUnpredictableInteger(0, NumBreakpointsImplemented() - 1, Unpredictable_BPNOTIMPL); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (BreakpointType_Inactive, FALSE); // If this breakpoint is not enabled, it cannot generate a match. // (This could also happen on a call from StateMatch for linking). if !IsBreakpointEnabled(n) then return (BreakpointType_Inactive, FALSE); // If BT is set to a reserved type, behaves either as disabled or as a not-reserved type. dbgtype = DBGBCR_EL1[n].BT; bt2 = if IsFeatureImplemented(FEAT_ABLE) then DBGBCR_EL1[n].BT2 else '0'; (c, bt2, dbgtype) = AArch64.ReservedBreakpointType(n, bt2, dbgtype); if c == Constraint_DISABLED then return (BreakpointType_Inactive, FALSE); // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value // Determine what to compare against. match_addr = (dbgtype IN {'0x0x'}); mismatch = (dbgtype IN {'010x'}); match_vmid = (dbgtype IN {'10xx'}); match_cid = (dbgtype IN {'001x'}); match_cid1 = (dbgtype IN {'101x', 'x11x'}); match_cid2 = (dbgtype IN {'11xx'}); linking_enabled = (dbgtype IN {'xx11', '1xx1'} || bt2 == '1'); // If this is a call from StateMatch, return FALSE if the breakpoint is not // programmed with linking enabled. if linked_to && !linking_enabled then return (BreakpointType_Inactive, FALSE); // If called from BreakpointMatch return FALSE for breakpoint with linking enabled. if !linked_to && linking_enabled then return (BreakpointType_Inactive, FALSE); boolean linked = (dbgtype IN {'0x01'}); if from_linking_enabled then // A breakpoint with linking enabled has called this function. assert linked_to && isbreakpnt; if linked then // A breakpoint with linking enabled is linked to a linked breakpoint. This is // architecturally UNPREDICTABLE, but treated as disabled in the pseudo code to // avoid potential recursion in BreakpointValueMatch(). return (BreakpointType_Inactive, FALSE); // If a linked breakpoint is linked to an address matching breakpoint, // the behavior is CONSTRAINED UNPREDICTABLE. if linked_to && match_addr && isbreakpnt then if !ConstrainUnpredictableBool(Unpredictable_BPLINKEDADDRMATCH) then return (BreakpointType_Inactive, FALSE); // A breakpoint programmed for address mismatch does not match in AArch32 state. if mismatch && UsingAArch32() then return (BreakpointType_Inactive, FALSE); boolean bvr_match = FALSE; boolean bxvr_match = FALSE; BreakpointType bp_type; integer mask; if IsFeatureImplemented(FEAT_BWE) then mask = UInt(DBGBCR_EL1[n].MASK); // If the mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE. if mask IN {1, 2} then (c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESBPMASK); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return (BreakpointType_Inactive, FALSE); // Disabled when Constraint_NONE mask = 0; // No masking // Otherwise the value returned by ConstrainUnpredictableBits must // be a not-reserved value. if mask != 0 then // When DBGBCR_EL1[n].MASK is a valid nonzero value, the behavior is // CONSTRAINED UNPREDICTABLE if any of the following are true: // - DBGBCR_EL1[n].<BT2,BT> is programmed for a Context matching breakpoint. // - DBGBCR_EL1[n].BAS is not '1111' and AArch32 is supported at EL0. if ((match_cid || match_cid1 || match_cid2) || (DBGBCR_EL1[n].BAS != '1111' && HaveAArch32())) then if !ConstrainUnpredictableBool(Unpredictable_BPMASK) then return (BreakpointType_Inactive, FALSE); else // A stand-alone mismatch of a single address is not supported. if mismatch then return (BreakpointType_Inactive, FALSE); else mask = 0; // Do the comparison. if match_addr then boolean byte_select_match; integer byte = UInt(vaddress<1:0>); if HaveAArch32() then // T32 instructions can be executed at EL0 in an AArch64 translation regime. assert byte IN {0,2}; // "vaddress" is halfword aligned byte_select_match = (DBGBCR_EL1[n].BAS<byte> == '1'); else assert byte == 0; // "vaddress" is word aligned byte_select_match = TRUE; // DBGBCR_EL1[n].BAS<byte> is RES1 // When FEAT_LVA3 is not implemented, if the DBGBVR_EL1[n].RESS field bits are not a // sign extension of the MSB of DBGBVR_EL1[n].VA, it is UNPREDICTABLE whether they // appear to be included in the match. // If 'vaddress' is outside of the current virtual address space, then the access // generates a Translation fault. constant integer dbgtop = DebugAddrTop(); boolean unpredictable_ress = (dbgtop < 55 && !IsOnes(DBGBVR_EL1[n]<63:dbgtop>) && !IsZero(DBGBVR_EL1[n]<63:dbgtop>) && ConstrainUnpredictableBool(Unpredictable_DBGxVR_RESS)); constant integer cmpmsb = if unpredictable_ress then 63 else dbgtop; constant integer cmplsb = if mask > 2 then mask else 2; bvr_match = ((vaddress<cmpmsb:cmplsb> == DBGBVR_EL1[n]<cmpmsb:cmplsb>) && byte_select_match); if mask > 2 then // If masked bits of DBGBVR_EL1[n] are not zero, the behavior // is CONSTRAINED UNPREDICTABLE. constant integer masktop = mask - 1; if bvr_match && !IsZero(DBGBVR_EL1[n]<masktop:2>) then bvr_match = ConstrainUnpredictableBool(Unpredictable_BPMASKEDBITS); elsif match_cid then if IsInHost() then bvr_match = (CONTEXTIDR_EL2<31:0> == DBGBVR_EL1[n]<31:0>); else bvr_match = (PSTATE.EL IN {EL0, EL1} && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>); elsif match_cid1 then bvr_match = (PSTATE.EL IN {EL0, EL1} && !IsInHost() && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>); if match_vmid then bits(16) vmid; bits(16) bvr_vmid; if !IsFeatureImplemented(FEAT_VMID16) || VTCR_EL2.VS == '0' then vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); bvr_vmid = ZeroExtend(DBGBVR_EL1[n]<39:32>, 16); else vmid = VTTBR_EL2.VMID; bvr_vmid = DBGBVR_EL1[n]<47:32>; bxvr_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !IsInHost() && vmid == bvr_vmid); elsif match_cid2 then bxvr_match = (PSTATE.EL != EL3 && EL2Enabled() && DBGBVR_EL1[n]<63:32> == CONTEXTIDR_EL2<31:0>); bvr_match_valid = (match_addr || match_cid || match_cid1); bxvr_match_valid = (match_vmid || match_cid2); value_match = (!bxvr_match_valid || bxvr_match) && (!bvr_match_valid || bvr_match); // A watchpoint might be linked to a linked address matching breakpoint with linking enabled, // which is in turn linked to a context matching breakpoint. if linked_to && linked then // If called from StateMatch and breakpoint is a linked breakpoint then it must be a // watchpoint that is linked to an address matching breakpoint which is linked to a // context matching breakpoint. assert !isbreakpnt && match_addr && IsFeatureImplemented(FEAT_ABLE); lbnx = if IsFeatureImplemented(FEAT_Debugv8p9) then DBGBCR_EL1[n].LBNX else '00'; linked_linked_n = UInt(lbnx : DBGBCR_EL1[n].LBN); boolean linked_value_match; linked_vaddress = bits(64) UNKNOWN; linked_linked_to = TRUE; linked_isbreakpnt = TRUE; linked_from_linking_enabled = TRUE; (bp_type, linked_value_match) = AArch64.BreakpointValueMatch(linked_linked_n, linked_vaddress, linked_linked_to, linked_isbreakpnt, linked_from_linking_enabled); value_match = value_match && linked_value_match; if match_addr && !mismatch then bp_type = BreakpointType_AddrMatch; elsif match_addr && mismatch then bp_type = BreakpointType_AddrMismatch; elsif match_vmid || match_cid || match_cid1 || match_cid2 then bp_type = BreakpointType_CtxtMatch; else Unreachable(); return (bp_type, value_match);
// AArch64.ReservedBreakpointType() // ================================ // Checks if the given DBGBCR<n>_EL1.{BT2,BT} values are reserved and will // generate Constrained Unpredictable behavior, otherwise returns Constraint_NONE. (Constraint, bit, bits(4)) AArch64.ReservedBreakpointType(integer n, bit bt2_in ,bits(4) bt_in) bit bt2 = bt2_in; bits(4) bt = bt_in; boolean reserved = FALSE; context_aware = IsContextAwareBreakpoint(n); if bt2 == '0' then // Context matching if !(bt IN {'0x0x'}) && !context_aware then reserved = TRUE; // EL2 extension if bt IN {'1xxx'} && !HaveEL(EL2) then reserved = TRUE; // Context matching if (bt IN {'011x','11xx'} && !IsFeatureImplemented(FEAT_VHE) && !IsFeatureImplemented(FEAT_Debugv8p2)) then reserved = TRUE; // Reserved if bt IN {'010x'} && !IsFeatureImplemented(FEAT_BWE) && !HaveAArch32EL(EL1) then reserved = TRUE; else // Reserved if !(bt IN {'0x0x'}) then reserved = TRUE; if reserved then Constraint c; (c, <bt2,bt>) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE, 5); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (c, bit UNKNOWN, bits(4) UNKNOWN); // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value return (Constraint_NONE, bt2, bt);
// AArch64.StateMatch() // ==================== // Determine whether a breakpoint or watchpoint is enabled in the current mode and state. boolean AArch64.StateMatch(bits(2) ssc_in, bit ssce_in, bit hmc_in, bits(2) pxc_in, boolean linked_in, integer linked_n_in, boolean isbreakpnt, bits(64) vaddress, AccessDescriptor accdesc) if !IsFeatureImplemented(FEAT_RME) then assert ssce_in == '0'; // "ssc_in","ssce_in","hmc_in","pxc_in" are the control fields from // the DBGBCR_EL1[n] or DBGWCR_EL1[n] register. // "linked_in" is TRUE if this is a linked breakpoint/watchpoint type. // "linked_n_in" is the linked breakpoint number from the DBGBCR_EL1[n] or // DBGWCR_EL1[n] register. // "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints. // "vaddress" is the program counter for a linked watchpoint or the same value passed to // AArch64.CheckBreakpoint for a linked breakpoint. // "accdesc" describes the properties of the access being matched. bits(2) ssc = ssc_in; bit ssce = ssce_in; bit hmc = hmc_in; bits(2) pxc = pxc_in; boolean linked = linked_in; integer linked_n = linked_n_in; // If parameters are set to a reserved type, behaves as either disabled or a defined type Constraint c; (c, ssc, ssce, hmc, pxc) = CheckValidStateMatch(ssc, ssce, hmc, pxc, isbreakpnt); if c == Constraint_DISABLED then return FALSE; // Otherwise the hmc,ssc,ssce,pxc values are either valid or the values returned by // CheckValidStateMatch are valid. EL3_match = HaveEL(EL3) && hmc == '1' && ssc<0> == '0'; EL2_match = HaveEL(EL2) && ((hmc == '1' && (ssc:pxc != '1000')) || ssc == '11'); EL1_match = pxc<0> == '1'; EL0_match = pxc<1> == '1'; boolean priv_match; case accdesc.el of when EL3 priv_match = EL3_match; when EL2 priv_match = EL2_match; when EL1 priv_match = EL1_match; when EL0 priv_match = EL0_match; // Security state match boolean ss_match; case ssce:ssc of when '000' ss_match = hmc == '1' || accdesc.ss != SS_Root; when '001' ss_match = accdesc.ss == SS_NonSecure; when '010' ss_match = (hmc == '1' && accdesc.ss == SS_Root) || accdesc.ss == SS_Secure; when '011' ss_match = (hmc == '1' && accdesc.ss != SS_Root) || accdesc.ss == SS_Secure; when '101' ss_match = accdesc.ss == SS_Realm; boolean linked_match = FALSE; if linked then // "linked_n" must be an enabled context-aware breakpoint unit. If it is not context-aware // then it is CONSTRAINED UNPREDICTABLE whether this gives no match, gives a match without // linking, or linked_n is mapped to some UNKNOWN breakpoint that is context-aware. if !IsContextAwareBreakpoint(linked_n) then (first_ctx_cmp, last_ctx_cmp) = ContextAwareBreakpointRange(); (c, linked_n) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE linked = FALSE; // No linking // Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint if linked then linked_to = TRUE; BreakpointType bp_type; from_linking_enabled = FALSE; (bp_type, linked_match) = AArch64.BreakpointValueMatch(linked_n, vaddress, linked_to, isbreakpnt, from_linking_enabled); if bp_type == BreakpointType_AddrMismatch then linked_match = !linked_match; return priv_match && ss_match && (!linked || linked_match);
// BreakpointType // ============== enumeration BreakpointType { BreakpointType_Inactive, // Breakpoint inactive or disabled BreakpointType_AddrMatch, // Address Match breakpoint BreakpointType_AddrMismatch, // Address Mismatch breakpoint BreakpointType_CtxtMatch };// Context matching breakpoint
// DebugAddrTop() // ============== // Returns the value for the top bit used in Breakpoint and Watchpoint address comparisons. integer DebugAddrTop() if IsFeatureImplemented(FEAT_LVA3) then return 55; elsif IsFeatureImplemented(FEAT_LVA) then return 52; else