This page displays common pseudocode functions shared by many pages
// AArch32.AT() // ============ // Perform address translation as per AT instructions. AArch32.AT(bits(32) vaddress, TranslationStage stage, bits(2) el, ATAccess ataccess) SecurityState ss; // ATS1Hx instructions if el == EL2 then regime = Regime_EL2; eae = TRUE; ss = SS_NonSecure; // ATS1Cxx instructions elsif stage == TranslationStage_1 || (stage == TranslationStage_12 && !HaveEL(EL2)) then stage = TranslationStage_1; ss = SecurityStateAtEL(PSTATE.EL); regime = if ss == SS_Secure && ELUsingAArch32(EL3) then Regime_EL30 else Regime_EL10; eae = TTBCR.EAE == '1'; // ATS12NSOxx instructions else regime = Regime_EL10; eae = if HaveAArch32EL(EL3) then TTBCR_NS.EAE == '1' else TTBCR.EAE == '1'; ss = SS_NonSecure; AddressDescriptor addrdesc; aligned = TRUE; ispriv = el != EL0; supersection = '0'; iswrite = ataccess IN {ATAccess_WritePAN, ATAccess_Write}; acctype = if ataccess IN {ATAccess_Read, ATAccess_Write} then AccType_AT else AccType_ATPAN; // Prepare fault fields in case a fault is detected fault = NoFault(); fault.acctype = acctype; fault.write = iswrite; if eae then (fault, addrdesc) = AArch32.S1TranslateLD(fault, regime, ss, vaddress, acctype, aligned, iswrite, ispriv); else (fault, addrdesc, sdftype) = AArch32.S1TranslateSD(fault, regime, ss, vaddress, acctype, aligned, iswrite, ispriv); supersection = if sdftype == SDFType_Supersection then '1' else '0'; // ATS12NSOxx instructions if stage == TranslationStage_12 && fault.statuscode == Fault_None then s2fs1walk = FALSE; (fault, addrdesc) = AArch32.S2Translate(fault, addrdesc, ss, s2fs1walk, acctype, aligned, iswrite, ispriv); if fault.statuscode != Fault_None then // Take exception when External abort occurs on translation table walk if (IsExternalAbort(fault) || (stage == TranslationStage_1 && el != EL2 && PSTATE.EL == EL1 && EL2Enabled() && fault.s2fs1walk)) then PAR = bits(64) UNKNOWN; AArch32.Abort(vaddress, fault); addrdesc.fault = fault; if (eae || (stage == TranslationStage_12 && (HCR.VM == '1' || HCR.DC == '1')) || (stage == TranslationStage_1 && el != EL2 && PSTATE.EL == EL2)) then AArch32.EncodePARLD(addrdesc, ss); else AArch32.EncodePARSD(addrdesc, supersection, ss); return;
// AArch32.EncodePARLD() // ===================== // Returns 64-bit format PAR on address translation instruction. AArch32.EncodePARLD(AddressDescriptor addrdesc, SecurityState ss) if !IsFault(addrdesc) then bit ns; if ss == SS_NonSecure then ns = bit UNKNOWN; elsif addrdesc.paddress.paspace == PAS_Secure then ns = '0'; else ns = '1'; PAR.F = '0'; PAR.SH = ReportedPARShareability(PAREncodeShareability(addrdesc.memattrs)); PAR.NS = ns; PAR<10> = bit IMPLEMENTATION_DEFINED "Non-Faulting PAR"; // IMPDEF PAR.LPAE = '1'; PAR.PA = addrdesc.paddress.address<39:12>; PAR.ATTR = ReportedPARAttrs(EncodePARAttrs(addrdesc.memattrs)); else PAR.F = '1'; PAR.FST = AArch32.PARFaultStatusLD(addrdesc.fault); PAR.S2WLK = if addrdesc.fault.s2fs1walk then '1' else '0'; PAR.FSTAGE = if addrdesc.fault.secondstage then '1' else '0'; PAR.LPAE = '1'; PAR<63:48> = bits(16) IMPLEMENTATION_DEFINED "Faulting PAR"; // IMPDEF return;
// AArch32.EncodePARSD() // ===================== // Returns 32-bit format PAR on address translation instruction. AArch32.EncodePARSD(AddressDescriptor addrdesc, bit supersection, SecurityState ss) if !IsFault(addrdesc) then if (addrdesc.memattrs.memtype == MemType_Device || (addrdesc.memattrs.inner.attrs == MemAttr_NC && addrdesc.memattrs.outer.attrs == MemAttr_NC)) then addrdesc.memattrs.shareability = Shareability_OSH; bit ns; if ss == SS_NonSecure then ns = bit UNKNOWN; elsif addrdesc.paddress.paspace == PAS_Secure then ns = '0'; else ns = '1'; bits(2) sh = if addrdesc.memattrs.shareability != Shareability_NSH then '01' else '00'; PAR.F = '0'; PAR.SS = supersection; PAR.Outer = AArch32.ReportedOuterAttrs(AArch32.PAROuterAttrs(addrdesc.memattrs)); PAR.Inner = AArch32.ReportedInnerAttrs(AArch32.PARInnerAttrs(addrdesc.memattrs)); PAR.SH = ReportedPARShareability(sh); PAR<8> = bit IMPLEMENTATION_DEFINED "Non-Faulting PAR"; // IMPDEF PAR.NS = ns; PAR.NOS = if addrdesc.memattrs.shareability == Shareability_OSH then '0' else '1'; PAR.LPAE = '0'; PAR.PA = addrdesc.paddress.address<39:12>; else PAR.F = '1'; PAR.FST = AArch32.PARFaultStatusSD(addrdesc.fault); PAR.LPAE = '0'; PAR<31:16> = bits(16) IMPLEMENTATION_DEFINED "Faulting PAR"; // IMPDEF return;
// AArch32.PARFaultStatusLD() // ========================== // Fault status field decoding of 64-bit PAR bits(6) AArch32.PARFaultStatusLD(FaultRecord fault) bits(32) syndrome; if fault.statuscode == Fault_Domain then // Report Domain fault assert fault.level IN {1,2}; syndrome<1:0> = if fault.level == 1 then '01' else '10'; syndrome<5:2> = '1111'; else syndrome = AArch32.FaultStatusLD(TRUE, fault); return syndrome<5:0>;
// AArch32.PARFaultStatusSD() // ========================== // Fault status field decoding of 32-bit PAR. bits(6) AArch32.PARFaultStatusSD(FaultRecord fault) bits(32) syndrome; syndrome = AArch32.FaultStatusSD(TRUE, fault); return syndrome<12,10,3:0>;
// AArch32.PARInnerAttrs() // ======================= // Convert orthogonal attributes and hints to 32-bit PAR Inner field. bits(3) AArch32.PARInnerAttrs(MemoryAttributes memattrs) bits(3) result; if memattrs.memtype == MemType_Device then if memattrs.device == DeviceType_nGnRnE then result = '001'; // Non-cacheable elsif memattrs.device == DeviceType_nGnRE then result = '011'; // Non-cacheable else MemAttrHints inner = memattrs.inner; if inner.attrs == MemAttr_NC then result = '000'; // Non-cacheable elsif inner.attrs == MemAttr_WB && inner.hints<0> == '1' then result = '101'; // Write-Back, Write-Allocate elsif inner.attrs == MemAttr_WT then result = '110'; // Write-Through elsif inner.attrs == MemAttr_WB && inner.hints<0> == '0' then result = '111'; // Write-Back, no Write-Allocate return result;
// AArch32.PAROuterAttrs() // ======================= // Convert orthogonal attributes and hints to 32-bit PAR Outer field. bits(2) AArch32.PAROuterAttrs(MemoryAttributes memattrs) bits(2) result; if memattrs.memtype == MemType_Device then result = bits(2) UNKNOWN; else MemAttrHints outer = memattrs.outer; if outer.attrs == MemAttr_NC then result = '00'; // Non-cacheable elsif outer.attrs == MemAttr_WB && outer.hints<0> == '1' then result = '01'; // Write-Back, Write-Allocate elsif outer.attrs == MemAttr_WT && outer.hints<0> == '0' then result = '10'; // Write-Through, no Write-Allocate elsif outer.attrs == MemAttr_WB && outer.hints<0> == '0' then result = '11'; // Write-Back, no Write-Allocate return result;
// AArch32.DC() // ============ // Perform Data Cache Operation. AArch32.DC(bits(32) regval, CacheOp cacheop, CacheOpScope opscope) AccType acctype = AccType_DC; CacheRecord cache; cache.acctype = acctype; cache.cacheop = cacheop; cache.opscope = opscope; cache.cachetype = CacheType_Data; cache.security = SecurityStateAtEL(PSTATE.EL); if opscope == CacheOpScope_SetWay then cache.shareability = Shareability_NSH; (cache.set, cache.way, cache.level) = DecodeSW(ZeroExtend(regval), CacheType_Data); if (cacheop == CacheOp_Invalidate && PSTATE.EL == EL1 && EL2Enabled() && ((!ELUsingAArch32(EL2) && HCR_EL2.SWIO == '1') || (ELUsingAArch32(EL2) && HCR.SWIO == '1') || (!ELUsingAArch32(EL2) && HCR_EL2.<DC,VM> != '00') || (ELUsingAArch32(EL2) && HCR.<DC,VM> != '00'))) then cache.cacheop = CacheOp_CleanInvalidate; CACHE_OP(cache); return; if EL2Enabled() then if PSTATE.EL IN {EL0, EL1} then cache.is_vmid_valid = TRUE; cache.vmid = VMID[]; else cache.is_vmid_valid = FALSE; else cache.is_vmid_valid = FALSE; if PSTATE.EL == EL0 then cache.is_asid_valid = TRUE; cache.asid = ASID[]; else cache.is_asid_valid = FALSE; need_translate = DCInstNeedsTranslation(opscope); iswrite = cacheop == CacheOp_Invalidate; vaddress = regval; size = 0; // by default no watchpoint address if iswrite then size = integer IMPLEMENTATION_DEFINED "Data Cache Invalidate Watchpoint Size"; assert size >= 4*(2^(UInt(CTR_EL0.DminLine))) && size <= 2048; assert (size<32:0> AND (size-1)<32:0>) == 0; // size is power of 2 vaddress = Align(regval, size); cache.translated = need_translate; cache.vaddress = ZeroExtend(vaddress); if need_translate then wasaligned = TRUE; memaddrdesc = AArch32.TranslateAddress(vaddress, acctype, iswrite, wasaligned, size); if IsFault(memaddrdesc) then AArch32.Abort(regval, memaddrdesc.fault); memattrs = memaddrdesc.memattrs; cache.paddress = memaddrdesc.paddress; if opscope == CacheOpScope_PoC then cache.shareability = memattrs.shareability; else cache.shareability = Shareability_NSH; else cache.shareability = Shareability UNKNOWN; cache.paddress = FullAddress UNKNOWN; if (cacheop == CacheOp_Invalidate && PSTATE.EL == EL1 && EL2Enabled() && ((!ELUsingAArch32(EL2) && HCR_EL2.<DC,VM> != '00') || (ELUsingAArch32(EL2) && HCR.<DC,VM> != '00'))) then cache.cacheop = CacheOp_CleanInvalidate; CACHE_OP(cache); return;
// AArch32.VCRMatch() // ================== boolean AArch32.VCRMatch(bits(32) vaddress) if UsingAArch32() && ELUsingAArch32(EL1) && PSTATE.EL != EL2 then // Each bit position in this string corresponds to a bit in DBGVCR and an exception vector. match_word = Zeros(32); if vaddress<31:5> == ExcVectorBase()<31:5> then if HaveEL(EL3) && !IsSecure() then match_word<UInt(vaddress<4:2>) + 24> = '1'; // Non-secure vectors else match_word<UInt(vaddress<4:2>) + 0> = '1'; // Secure vectors (or no EL3) if HaveEL(EL3) && ELUsingAArch32(EL3) && IsSecure() && vaddress<31:5> == MVBAR<31:5> then match_word<UInt(vaddress<4:2>) + 8> = '1'; // Monitor vectors // Mask out bits not corresponding to vectors. if !HaveEL(EL3) then mask = '00000000':'00000000':'00000000':'11011110'; // DBGVCR[31:8] are RES0 elsif !ELUsingAArch32(EL3) then mask = '11011110':'00000000':'00000000':'11011110'; // DBGVCR[15:8] are RES0 else mask = '11011110':'00000000':'11011100':'11011110'; match_word = match_word AND DBGVCR AND mask; match = !IsZero(match_word); // Check for UNPREDICTABLE case - match on Prefetch Abort and Data Abort vectors if !IsZero(match_word<28:27,12:11,4:3>) && DebugTarget() == PSTATE.EL then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHDAPA); if !IsZero(vaddress<1:0>) && match then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHHALF); else match = FALSE; return match;
// AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // ======================================================== boolean AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled() // The definition of this function is IMPLEMENTATION DEFINED. // In the recommended interface, AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled returns // the state of the (DBGEN AND SPIDEN) signal. if !HaveEL(EL3) && !IsSecure() then return FALSE; return DBGEN == HIGH && SPIDEN == HIGH;
// AArch32.BreakpointMatch() // ========================= // Breakpoint matching in an AArch32 translation regime. (boolean,boolean) AArch32.BreakpointMatch(integer n, bits(32) vaddress, integer size) assert ELUsingAArch32(S1TranslationRegime()); assert n < NumBreakpointsImplemented(); enabled = DBGBCR[n].E == '1'; ispriv = PSTATE.EL != EL0; linked = DBGBCR[n].BT == '0x01'; isbreakpnt = TRUE; linked_to = FALSE; state_match = AArch32.StateMatch(DBGBCR[n].SSC, DBGBCR[n].HMC, DBGBCR[n].PMC, linked, DBGBCR[n].LBN, isbreakpnt, ispriv); (value_match, value_mismatch) = AArch32.BreakpointValueMatch(n, vaddress, linked_to); if size == 4 then // Check second halfword // If the breakpoint address and BAS of an Address breakpoint match the address of the // second halfword of an instruction, but not the address of the first halfword, it is // CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug // event. (match_i, mismatch_i) = AArch32.BreakpointValueMatch(n, vaddress + 2, linked_to); if !value_match && match_i then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if value_mismatch && !mismatch_i then value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF); if vaddress<1> == '1' && DBGBCR[n].BAS == '1111' then // The above notwithstanding, if DBGBCR[n].BAS == '1111', then it is CONSTRAINED // UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction // at the address DBGBVR[n]+2. if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if !value_mismatch then value_mismatch = ConstrainUnpredictableBool(Unpredictable_BPMISMATCHHALF); match = value_match && state_match && enabled; mismatch = value_mismatch && state_match && enabled; return (match, mismatch);
// AArch32.BreakpointValueMatch() // ============================== // The first result is whether an Address Match or Context breakpoint is programmed on the // instruction at "address". The second result is whether an Address Mismatch breakpoint is // programmed on the instruction, that is, whether the instruction should be stepped. (boolean,boolean) AArch32.BreakpointValueMatch(integer n, bits(32) vaddress, boolean linked_to) // "n" is the identity of the breakpoint unit to match against. // "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context // matching breakpoints. // "linked_to" is TRUE if this is a call from StateMatch for linking. // If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives // no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint. if n >= NumBreakpointsImplemented() then (c, n) = ConstrainUnpredictableInteger(0, NumBreakpointsImplemented() - 1, Unpredictable_BPNOTIMPL); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (FALSE,FALSE); // If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a // call from StateMatch for linking). if DBGBCR[n].E == '0' then return (FALSE,FALSE); context_aware = (n >= (NumBreakpointsImplemented() - NumContextAwareBreakpointsImplemented())); // If BT is set to a reserved type, behaves either as disabled or as a not-reserved type. dbgtype = DBGBCR[n].BT; if ((dbgtype IN {'011x','11xx'} && !HaveVirtHostExt() && !HaveV82Debug()) || // Context matching (dbgtype == '010x' && HaltOnBreakpointOrWatchpoint()) || // Address mismatch (dbgtype != '0x0x' && !context_aware) || // Context matching (dbgtype == '1xxx' && !HaveEL(EL2))) then // EL2 extension (c, dbgtype) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return (FALSE,FALSE); // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value // Determine what to compare against. match_addr = (dbgtype == '0x0x'); mismatch = (dbgtype == '010x'); match_vmid = (dbgtype == '10xx'); match_cid1 = (dbgtype == 'xx1x'); match_cid2 = (dbgtype == '11xx'); linked = (dbgtype == 'xxx1'); // If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a // VMID and/or context ID match, of if not context-aware. The above assertions mean that the // code can just test for match_addr == TRUE to confirm all these things. if linked_to && (!linked || match_addr) then return (FALSE,FALSE); // If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches. if !linked_to && linked && !match_addr then return (FALSE,FALSE); // Do the comparison. if match_addr then byte = UInt(vaddress<1:0>); assert byte IN {0,2}; // "vaddress" is halfword aligned byte_select_match = (DBGBCR[n].BAS<byte> == '1'); integer top = 31; BVR_match = (vaddress<top:2> == DBGBVR[n]<top:2>) && byte_select_match; elsif match_cid1 then BVR_match = (PSTATE.EL != EL2 && CONTEXTIDR == DBGBVR[n]<31:0>); if match_vmid then if ELUsingAArch32(EL2) then vmid = ZeroExtend(VTTBR.VMID, 16); bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16); elsif !Have16bitVMID() || VTCR_EL2.VS == '0' then vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); bvr_vmid = ZeroExtend(DBGBXVR[n]<7:0>, 16); else vmid = VTTBR_EL2.VMID; bvr_vmid = DBGBXVR[n]<15:0>; BXVR_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && vmid == bvr_vmid); elsif match_cid2 then BXVR_match = (PSTATE.EL != EL3 && (HaveVirtHostExt() || HaveV82Debug()) && EL2Enabled() && !ELUsingAArch32(EL2) && DBGBXVR[n]<31:0> == CONTEXTIDR_EL2<31:0>); bvr_match_valid = (match_addr || match_cid1); bxvr_match_valid = (match_vmid || match_cid2); match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match); return (match && !mismatch, !match && mismatch);
// AArch32.StateMatch() // ==================== // Determine whether a breakpoint or watchpoint is enabled in the current mode and state. boolean AArch32.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN, boolean isbreakpnt, boolean ispriv) // "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register. // "linked" is TRUE if this is a linked breakpoint/watchpoint type. // "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register. // "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints. // "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses. // If parameters are set to a reserved type, behaves as either disabled or a defined type (c, SSC, HMC, PxC) = CheckValidStateMatch(SSC, HMC, PxC, isbreakpnt); if c == Constraint_DISABLED then return FALSE; // Otherwise the HMC,SSC,PxC values are either valid or the values returned by // CheckValidStateMatch are valid. PL2_match = HaveEL(EL2) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11'); PL1_match = PxC<0> == '1'; PL0_match = PxC<1> == '1'; SSU_match = isbreakpnt && HMC == '0' && PxC == '00' && SSC != '11'; if !ispriv && !isbreakpnt then priv_match = PL0_match; elsif SSU_match then priv_match = PSTATE.M IN {M32_User,M32_Svc,M32_System}; else case PSTATE.EL of when EL3 priv_match = PL1_match; // EL3 and EL1 are both PL1 when EL2 priv_match = PL2_match; when EL1 priv_match = PL1_match; when EL0 priv_match = PL0_match; case SSC of when '00' security_state_match = TRUE; // Both when '01' security_state_match = !IsSecure(); // Non-secure only when '10' security_state_match = IsSecure(); // Secure only when '11' security_state_match = (HMC == '1' || IsSecure()); // HMC=1 -> Both, 0 -> Secure only if linked then // "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then // it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some // UNKNOWN breakpoint that is context-aware. lbn = UInt(LBN); first_ctx_cmp = NumBreakpointsImplemented() - NumContextAwareBreakpointsImplemented(); last_ctx_cmp = NumBreakpointsImplemented() - 1; if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then (c, lbn) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE linked = FALSE; // No linking // Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint if linked then vaddress = bits(32) UNKNOWN; linked_to = TRUE; (linked_match,-) = AArch32.BreakpointValueMatch(lbn, vaddress, linked_to); return priv_match && security_state_match && (!linked || linked_match);
// AArch32.GenerateDebugExceptions() // ================================= boolean AArch32.GenerateDebugExceptions() return AArch32.GenerateDebugExceptionsFrom(PSTATE.EL, IsSecure());
// AArch32.GenerateDebugExceptionsFrom() // ===================================== boolean AArch32.GenerateDebugExceptionsFrom(bits(2) from, boolean secure) if !ELUsingAArch32(DebugTargetFrom(secure)) then mask = '0'; // No PSTATE.D in AArch32 state return AArch64.GenerateDebugExceptionsFrom(from, secure, mask); if DBGOSLSR.OSLK == '1' || DoubleLockStatus() || Halted() then return FALSE; if HaveEL(EL3) && secure then assert from != EL2; // Secure EL2 always uses AArch64 if IsSecureEL2Enabled() then // Implies that EL3 and EL2 both using AArch64 enabled = MDCR_EL3.SDD == '0'; else spd = if ELUsingAArch32(EL3) then SDCR.SPD else MDCR_EL3.SPD32; if spd<1> == '1' then enabled = spd<0> == '1'; else // SPD == 0b01 is reserved, but behaves the same as 0b00. enabled = AArch32.SelfHostedSecurePrivilegedInvasiveDebugEnabled(); if from == EL0 then enabled = enabled || SDER.SUIDEN == '1'; else enabled = from != EL2; return enabled;
// AArch32.CheckForPMUOverflow() // ============================= // Signal Performance Monitors overflow IRQ and CTI overflow events AArch32.CheckForPMUOverflow() if !ELUsingAArch32(EL1) then AArch64.CheckForPMUOverflow(); return; if HaveEL(EL2) then hpme = if !ELUsingAArch32(EL2) then MDCR_EL2.HPME else HDCR.HPME; pmuirq = PMCR.E == '1' && PMINTENSET.C == '1' && PMOVSSET.C == '1'; for idx = 0 to GetNumEventCounters() - 1 E = if AArch32.PMUCounterIsHyp(idx) then hpme else PMCR.E; if E == '1' && PMINTENSET<idx> == '1' && PMOVSSET<idx> == '1' then pmuirq = TRUE; SetInterruptRequestLevel(InterruptID_PMUIRQ, if pmuirq then HIGH else LOW); CTI_SetEventLevel(CrossTriggerIn_PMUOverflow, if pmuirq then HIGH else LOW); // The request remains set until the condition is cleared. (For example, an interrupt handler // or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR.)
// AArch32.ClearEventCounters() // ============================ // Zero all the event counters. AArch32.ClearEventCounters() if HaveAArch64() then // Force the counter to be cleared as a 64-bit counter. AArch64.ClearEventCounters(); return; for idx = 0 to AArch32.GetNumEventCountersAccessible() - 1 PMEVCNTR[idx] = Zeros();
// AArch32.CountPMUEvents() // ======================== // Return TRUE if counter "idx" should count its event. For the cycle counter, idx == CYCLE_COUNTER_ID. boolean AArch32.CountPMUEvents(integer idx) assert idx == CYCLE_COUNTER_ID || idx < GetNumEventCounters(); if !ELUsingAArch32(EL1) then return AArch64.CountPMUEvents(idx); // Event counting is disabled in Debug state debug = Halted(); // Software can reserve some counters for EL2 resvd_for_el2 = AArch32.PMUCounterIsHyp(idx); // Main enable controls if idx == CYCLE_COUNTER_ID then enabled = PMCR.E == '1' && PMCNTENSET.C == '1'; else if resvd_for_el2 then E = if ELUsingAArch32(EL2) then HDCR.HPME else MDCR_EL2.HPME; else E = PMCR.E; enabled = E == '1' && PMCNTENSET<idx> == '1'; // Event counting is allowed unless it is prohibited by any rule below prohibited = FALSE; // Event counting in Secure state is prohibited if all of: // * EL3 is implemented // * One of the following is true: // - EL3 is using AArch64, MDCR_EL3.SPME == 0, and either: // - FEAT_PMUv3p7 is not implemented // - MDCR_EL3.MPMX == 0 // - EL3 is using AArch32 and SDCR.SPME == 0 // * Not executing at EL0, or SDER.SUNIDEN == 0 if HaveEL(EL3) && IsSecure() then spme = if ELUsingAArch32(EL3) then SDCR.SPME else MDCR_EL3.SPME; if !ELUsingAArch32(EL3) && HavePMUv3p7() then prohibited = spme == '0' && MDCR_EL3.MPMX == '0'; else prohibited = spme == '0'; if prohibited && PSTATE.EL == EL0 then prohibited = SDER.SUNIDEN == '0'; // Event counting at EL2 is prohibited if all of: // * The HPMD Extension is implemented // * PMNx is not reserved for EL2 // * HDCR.HPMD == 1 if !prohibited && PSTATE.EL == EL2 && HaveHPMDExt() && !resvd_for_el2 then prohibited = HDCR.HPMD == '1'; // The IMPLEMENTATION DEFINED authentication interface might override software if prohibited && !HaveNoSecurePMUDisableOverride() then prohibited = !ExternalSecureNoninvasiveDebugEnabled(); // PMCR.DP disables the cycle counter when event counting is prohibited if prohibited && idx == CYCLE_COUNTER_ID then enabled = enabled && (PMCR.DP == '0'); prohibited = FALSE; // Otherwise whether event counting is prohibited does not affect the cycle counter // If FEAT_PMUv3p5 is implemented, cycle counting can be prohibited. // This is not overridden by PMCR.DP. if HavePMUv3p5() && idx == CYCLE_COUNTER_ID then if HaveEL(EL3) && IsSecure() then sccd = if ELUsingAArch32(EL3) then SDCR.SCCD else MDCR_EL3.SCCD; if sccd == '1' then prohibited = TRUE; if PSTATE.EL == EL2 && HDCR.HCCD == '1' then prohibited = TRUE; // Event counting might be frozen frozen = FALSE; // If FEAT_PMUv3p7 is implemented, event counting can be frozen if HavePMUv3p7() && idx != CYCLE_COUNTER_ID then if HaveEL(EL2) then hpmn = if !ELUsingAArch32(EL2) then MDCR_EL2.HPMN else HDCR.HPMN; ovflws = ZeroExtend(PMOVSSET<GetNumEventCounters()-1:0>); if resvd_for_el2 then FZ = if ELUsingAArch32(EL2) then HDCR.HPMFZO else MDCR_EL2.HPMFZO; ovflws<UInt(hpmn)-1:0> = Zeros(); else FZ = PMCR.FZO; if HaveEL(EL2) && UInt(hpmn) < GetNumEventCounters() then ovflws<GetNumEventCounters()-1:UInt(hpmn)> = Zeros(); frozen = FZ == '1' && !IsZero(ovflws); // Event counting can be filtered by the {P, U, NSK, NSU, NSH} bits filter = if idx == CYCLE_COUNTER_ID then PMCCFILTR else PMEVTYPER[idx]; P = filter<31>; U = filter<30>; NSK = if HaveEL(EL3) then filter<29> else '0'; NSU = if HaveEL(EL3) then filter<28> else '0'; NSH = if HaveEL(EL2) then filter<27> else '0'; case PSTATE.EL of when EL0 filtered = if IsSecure() then U == '1' else U != NSU; when EL1 filtered = if IsSecure() then P == '1' else P != NSK; when EL2 filtered = NSH == '0'; when EL3 filtered = P == '1'; return !debug && enabled && !prohibited && !filtered && !frozen;
// AArch32.GetNumEventCountersAccessible() // ======================================= // Return the number of event counters that can be accessed at the current Exception level. integer AArch32.GetNumEventCountersAccessible() // Software can reserve some counters for EL2 if PSTATE.EL IN {EL1, EL0} && EL2Enabled() then n = UInt(if !ELUsingAArch32(EL2) then MDCR_EL2.HPMN else HDCR.HPMN); else n = GetNumEventCounters(); return n;
// AArch32.IncrementEventCounter() // =============================== // Increment the specified event counter by the specified amount. AArch32.IncrementEventCounter(integer idx, integer increment) if HaveAArch64() then // Force the counter to be incremented as a 64-bit counter. AArch64.IncrementEventCounter(idx, increment); return; // In this model, event counters in an AArch32-only implementation are 32 bits and // the LP bits are RES0 in this model, even if FEAT_PMUv3p5 is implemented. old_value = UInt(PMEVCNTR[idx]); new_value = old_value + increment; PMEVCNTR[idx] = new_value<31:0>; ovflw = 32; if old_value<64:ovflw> != new_value<64:ovflw> then PMOVSSET<idx> = '1'; PMOVSR<idx> = '1'; // Check for the CHAIN event from an even counter if idx<0> == '0' && idx + 1 < GetNumEventCounters() then AArch32.PMUEvent(PMU_EVENT_CHAIN, 1, idx + 1);
// AArch32.PMUCounterIsHyp // ======================= // Returns TRUE if a counter is reserved for use by EL2, FALSE otherwise. boolean AArch32.PMUCounterIsHyp(integer n) // Software can reserve some counters for EL2 if HaveEL(EL2) then hpmn = if !ELUsingAArch32(EL2) then MDCR_EL2.HPMN else HDCR.HPMN; resvd_for_el2 = n >= UInt(hpmn) && n != CYCLE_COUNTER_ID; if !HaveFeatHPMN0() && hpmn == '00000' then resvd_for_el2 = boolean UNKNOWN; else resvd_for_el2 = FALSE; return resvd_for_el2;
// AArch32.PMUCycle() // ================== AArch32.PMUCycle() if !HavePMUv3() || !AArch32.CountPMUEvents(CYCLE_COUNTER_ID) then return; if PMCR.LC == '0' && PMCR.D == '1' && !HasElapsed64Cycles() then PMUEvent(PMU_EVENT_CPU_CYCLES); return; old_value = UInt(PMCCNTR); new_value = old_value + 1; PMCCNTR = new_value<63:0>; ovflw = if PMCR.LC == '1' then 64 else 32; if old_value<64:ovflw> != new_value<64:ovflw> then PMOVSSET.C = '1'; PMOVSR.C = '1'; AArch32.CheckForPMUOverflow(); PMUEvent(PMU_EVENT_CPU_CYCLES);
// AArch32.PMUEvent() // ================== // Generate a PMU Event. All the event counters are checked for the event. // If any of the counters overflow then an interrupt is raised. AArch32.PMUEvent(bits(16) event, integer increment) if !HavePMUv3() then return; // Count the event for idx = 0 to GetNumEventCounters() - 1 if PMEVTYPER[idx].evtCount == event && AArch32.CountPMUEvents(idx) then AArch32.IncrementEventCounter(idx, increment); AArch32.CheckForPMUOverflow(); // AArch32.PMUEvent() // ================== // Generate a PMU Event for a specific event counter. AArch32.PMUEvent(bits(16) event, integer increment, integer idx) if !HavePMUv3() then return; // Count the event if PMEVTYPER[idx].evtCount == event && AArch32.CountPMUEvents(idx) then AArch32.IncrementEventCounter(idx, increment); // This function is only called from other functions which will check for overflow later
// AArch32.PMUSwIncrement() // ======================== // Generate PMU Events on a write to PMSWINC. AArch32.PMUSwIncrement(bits(32) sw_incr) for idx = 0 to AArch32.GetNumEventCountersAccessible() - 1 if sw_incr<idx> == '1' then AArch32.PMUEvent(PMU_EVENT_SW_INCR, 1, idx); AArch32.CheckForPMUOverflow();
// AArch32.EnterHypModeInDebugState() // ================================== // Take an exception in Debug state to Hyp mode. AArch32.EnterHypModeInDebugState(ExceptionRecord exception) SynchronizeContext(); assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2); AArch32.ReportHypEntry(exception); AArch32.WriteMode(M32_Hyp); SPSR[] = bits(32) UNKNOWN; ELR_hyp = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; PSTATE.E = HSCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); EndOfInstruction();
// AArch32.EnterModeInDebugState() // =============================== // Take an exception in Debug state to a mode other than Monitor and Hyp mode. AArch32.EnterModeInDebugState(bits(5) target_mode) SynchronizeContext(); assert ELUsingAArch32(EL1) && PSTATE.EL != EL2; if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(target_mode); SPSR[] = bits(32) UNKNOWN; R[14] = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HavePANExt() && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. EndOfInstruction();
// AArch32.EnterMonitorModeInDebugState() // ====================================== // Take an exception in Debug state to Monitor mode. AArch32.EnterMonitorModeInDebugState() SynchronizeContext(); assert HaveEL(EL3) && ELUsingAArch32(EL3); from_secure = IsSecure(); if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(M32_Monitor); SPSR[] = bits(32) UNKNOWN; R[14] = bits(32) UNKNOWN; // In Debug state, the PE always execute T32 instructions when in AArch32 state, and // PSTATE.{SS,A,I,F} are not observable so behave as UNKNOWN. PSTATE.T = '1'; // PSTATE.J is RES0 PSTATE.<SS,A,I,F> = bits(4) UNKNOWN; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HavePANExt() then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. EndOfInstruction();
// AArch32.WatchpointByteMatch() // ============================= boolean AArch32.WatchpointByteMatch(integer n, bits(32) vaddress) integer top = 31; bottom = if DBGWVR[n]<2> == '1' then 2 else 3; // Word or doubleword byte_select_match = (DBGWCR[n].BAS<UInt(vaddress<bottom-1:0>)> != '0'); mask = UInt(DBGWCR[n].MASK); // If DBGWCR[n].MASK is non-zero value and DBGWCR[n].BAS is not set to '11111111', or // DBGWCR[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED // UNPREDICTABLE. if mask > 0 && !IsOnes(DBGWCR[n].BAS) then byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPMASKANDBAS); else LSB = (DBGWCR[n].BAS AND NOT(DBGWCR[n].BAS - 1)); MSB = (DBGWCR[n].BAS + LSB); if !IsZero(MSB AND (MSB - 1)) then // Not contiguous byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS); bottom = 3; // For the whole doubleword // If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE. if mask > 0 && mask <= 2 then (c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE mask = 0; // No masking // Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value if mask > bottom then // If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB // of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be // included in the match. if !IsOnes(DBGBVR_EL1[n]<63:top>) && !IsZero(DBGBVR_EL1[n]<63:top>) then if ConstrainUnpredictableBool(Unpredictable_DBGxVR_RESS) then top = 63; WVR_match = (vaddress<top:mask> == DBGWVR[n]<top:mask>); // If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE. if WVR_match && !IsZero(DBGWVR[n]<mask-1:bottom>) then WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITS); else WVR_match = vaddress<top:bottom> == DBGWVR[n]<top:bottom>; return WVR_match && byte_select_match;
// AArch32.WatchpointMatch() // ========================= // Watchpoint matching in an AArch32 translation regime. boolean AArch32.WatchpointMatch(integer n, bits(32) vaddress, integer size, boolean ispriv, AccType acctype, boolean iswrite) assert ELUsingAArch32(S1TranslationRegime()); assert n < NumWatchpointsImplemented(); // "ispriv" is: // * FALSE for all loads, stores, and atomic operations executed at EL0. // * FALSE if the access is unprivileged. // * TRUE for all other loads, stores, and atomic operations. enabled = DBGWCR[n].E == '1'; linked = DBGWCR[n].WT == '1'; isbreakpnt = FALSE; state_match = AArch32.StateMatch(DBGWCR[n].SSC, DBGWCR[n].HMC, DBGWCR[n].PAC, linked, DBGWCR[n].LBN, isbreakpnt, ispriv); ls_match = FALSE; ls_match = (DBGWCR[n].LSC<(if iswrite then 1 else 0)> == '1'); value_match = FALSE; for byte = 0 to size - 1 value_match = value_match || AArch32.WatchpointByteMatch(n, vaddress + byte); return value_match && state_match && ls_match && enabled;
// AArch32.Abort() // =============== // Abort and Debug exception handling in an AArch32 translation regime. AArch32.Abort(bits(32) vaddress, FaultRecord fault) // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = (HCR_EL2.TGE == '1' || IsSecondStage(fault) || (HaveRASExt() && HCR_EL2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && MDCR_EL2.TDE == '1')); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.EA == '1' && IsExternalAbort(fault); if route_to_aarch64 then AArch64.Abort(ZeroExtend(vaddress), fault); elsif fault.acctype == AccType_IFETCH then AArch32.TakePrefetchAbortException(vaddress, fault); else AArch32.TakeDataAbortException(vaddress, fault);
// AArch32.AbortSyndrome() // ======================= // Creates an exception syndrome record for Abort exceptions taken to Hyp mode // from an AArch32 translation regime. ExceptionRecord AArch32.AbortSyndrome(Exception exceptype, FaultRecord fault, bits(32) vaddress) exception = ExceptionSyndrome(exceptype); d_side = exceptype == Exception_DataAbort; exception.syndrome = AArch32.FaultSyndrome(d_side, fault); exception.vaddress = ZeroExtend(vaddress); if IPAValid(fault) then exception.ipavalid = TRUE; exception.NS = if fault.ipaddress.paspace == PAS_NonSecure then '1' else '0'; exception.ipaddress = ZeroExtend(fault.ipaddress.address); else exception.ipavalid = FALSE; return exception;
// AArch32.CheckPCAlignment() // ========================== AArch32.CheckPCAlignment() bits(32) pc = ThisInstrAddr(); if (CurrentInstrSet() == InstrSet_A32 && pc<1> == '1') || pc<0> == '1' then if AArch32.GeneralExceptionsToAArch64() then AArch64.PCAlignmentFault(); // Generate an Alignment fault Prefetch Abort exception vaddress = pc; acctype = AccType_IFETCH; iswrite = FALSE; secondstage = FALSE; AArch32.Abort(vaddress, AlignmentFault(acctype, iswrite, secondstage));
// AArch32.ReportDataAbort() // ========================= // Report syndrome information for aborts taken to modes other than Hyp mode. AArch32.ReportDataAbort(boolean route_to_monitor, FaultRecord fault, bits(32) vaddress) long_format = FALSE; if route_to_monitor && !IsSecure() then long_format = ((TTBCR_S.EAE == '1') || (IsExternalSyncAbort(fault) && ((PSTATE.EL == EL2 || TTBCR.EAE == '1') || (fault.secondstage && boolean IMPLEMENTATION_DEFINED "Stage 2 synchronous external abort reports using Long-descriptor format when TTBCR_S.EAE is 0b0")))); else long_format = TTBCR.EAE == '1'; d_side = TRUE; if long_format then syndrome = AArch32.FaultStatusLD(d_side, fault); else syndrome = AArch32.FaultStatusSD(d_side, fault); if fault.acctype == AccType_IC then if (!long_format && boolean IMPLEMENTATION_DEFINED "Report I-cache maintenance fault in IFSR") then i_syndrome = syndrome; syndrome<10,3:0> = EncodeSDFSC(Fault_ICacheMaint, 1); else i_syndrome = bits(32) UNKNOWN; if route_to_monitor then IFSR_S = i_syndrome; else IFSR = i_syndrome; if route_to_monitor then DFSR_S = syndrome; DFAR_S = vaddress; else DFSR = syndrome; DFAR = vaddress; return;
// AArch32.ReportPrefetchAbort() // ============================= // Report syndrome information for aborts taken to modes other than Hyp mode. AArch32.ReportPrefetchAbort(boolean route_to_monitor, FaultRecord fault, bits(32) vaddress) // The encoding used in the IFSR can be Long-descriptor format or Short-descriptor format. // Normally, the current translation table format determines the format. For an abort from // Non-secure state to Monitor mode, the IFSR uses the Long-descriptor format if any of the // following applies: // * The Secure TTBCR.EAE is set to 1. // * It is taken from Hyp mode. // * It is taken from EL1 or EL0, and the Non-secure TTBCR.EAE is set to 1. long_format = FALSE; if route_to_monitor && !IsSecure() then long_format = TTBCR_S.EAE == '1' || PSTATE.EL == EL2 || TTBCR.EAE == '1'; else long_format = TTBCR.EAE == '1'; d_side = FALSE; if long_format then fsr = AArch32.FaultStatusLD(d_side, fault); else fsr = AArch32.FaultStatusSD(d_side, fault); if route_to_monitor then IFSR_S = fsr; IFAR_S = vaddress; else IFSR = fsr; IFAR = vaddress; return;
// AArch32.TakeDataAbortException() // ================================ AArch32.TakeDataAbortException(bits(32) vaddress, FaultRecord fault) route_to_monitor = HaveEL(EL3) && SCR.EA == '1' && IsExternalAbort(fault); route_to_hyp = (EL2Enabled() && PSTATE.EL IN {EL0, EL1} && (HCR.TGE == '1' || (HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && HDCR.TDE == '1') || IsSecondStage(fault))); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x10; lr_offset = 8; if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe; if route_to_monitor then AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = AArch32.AbortSyndrome(Exception_DataAbort, fault, vaddress); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePrefetchAbortException() // ==================================== AArch32.TakePrefetchAbortException(bits(32) vaddress, FaultRecord fault) route_to_monitor = HaveEL(EL3) && SCR.EA == '1' && IsExternalAbort(fault); route_to_hyp = (EL2Enabled() && PSTATE.EL IN {EL0, EL1} && (HCR.TGE == '1' || (HaveRASExt() && HCR2.TEA == '1' && IsExternalAbort(fault)) || (IsDebugException(fault) && HDCR.TDE == '1') || IsSecondStage(fault))); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0C; lr_offset = 4; if IsDebugException(fault) then DBGDSCRext.MOE = fault.debugmoe; if route_to_monitor then AArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then if fault.statuscode == Fault_Alignment then // PC Alignment fault exception = ExceptionSyndrome(Exception_PCAlignment); exception.vaddress = ThisInstrAddr(); else exception = AArch32.AbortSyndrome(Exception_InstructionAbort, fault, vaddress); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.ReportPrefetchAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalFIQException() // ================================== AArch32.TakePhysicalFIQException() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.FMO == '1' && !IsInHost()); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.FIQ == '1'; if route_to_aarch64 then AArch64.TakePhysicalFIQException(); route_to_monitor = HaveEL(EL3) && SCR.FIQ == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.FMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x1C; lr_offset = 4; if route_to_monitor then AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = ExceptionSyndrome(Exception_FIQ); AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterMode(M32_FIQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalIRQException() // ================================== // Take an enabled physical IRQ exception. AArch32.TakePhysicalIRQException() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || (HCR_EL2.IMO == '1' && !IsInHost()); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.IRQ == '1'; if route_to_aarch64 then AArch64.TakePhysicalIRQException(); route_to_monitor = HaveEL(EL3) && SCR.IRQ == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.IMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x18; lr_offset = 4; if route_to_monitor then AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); elsif PSTATE.EL == EL2 || route_to_hyp then exception = ExceptionSyndrome(Exception_IRQ); AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterMode(M32_IRQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakePhysicalSErrorException() // ===================================== AArch32.TakePhysicalSErrorException(boolean parity, bit extflag, bits(2) pe_error_state, bits(25) full_syndrome) // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = (HCR_EL2.TGE == '1' || (!IsInHost() && HCR_EL2.AMO == '1')); if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.EA == '1'; if route_to_aarch64 then AArch64.TakePhysicalSErrorException(full_syndrome); route_to_monitor = HaveEL(EL3) && SCR.EA == '1'; route_to_hyp = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.AMO == '1')); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x10; lr_offset = 8; bits(2) target_el; if route_to_monitor then target_el = EL3; elsif PSTATE.EL == EL2 || route_to_hyp then target_el = EL2; else target_el = EL1; if IsSErrorEdgeTriggered(target_el, full_syndrome) then ClearPendingPhysicalSError(); fault = AsyncExternalAbort(parity, pe_error_state, extflag); vaddress = bits(32) UNKNOWN; case target_el of when EL3 AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset); when EL2 exception = AArch32.AbortSyndrome(Exception_DataAbort, fault, vaddress); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); when EL1 AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset); otherwise Unreachable();
// AArch32.TakeVirtualFIQException() // ================================= AArch32.TakeVirtualFIQException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual IRQ enabled if TGE==0 and FMO==1 assert HCR.TGE == '0' && HCR.FMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.FMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualFIQException(); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x1C; lr_offset = 4; AArch32.EnterMode(M32_FIQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualIRQException() // ================================= AArch32.TakeVirtualIRQException() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual IRQs enabled if TGE==0 and IMO==1 assert HCR.TGE == '0' && HCR.IMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.IMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualIRQException(); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x18; lr_offset = 4; AArch32.EnterMode(M32_IRQ, preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeVirtualSErrorException() // ==================================== AArch32.TakeVirtualSErrorException(bit extflag, bits(2) pe_error_state, bits(25) full_syndrome) assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); if ELUsingAArch32(EL2) then // Virtual SError enabled if TGE==0 and AMO==1 assert HCR.TGE == '0' && HCR.AMO == '1'; else assert HCR_EL2.TGE == '0' && HCR_EL2.AMO == '1'; // Check if routed to AArch64 state if PSTATE.EL == EL0 && !ELUsingAArch32(EL1) then AArch64.TakeVirtualSErrorException(full_syndrome); route_to_monitor = FALSE; bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x10; lr_offset = 8; vaddress = bits(32) UNKNOWN; parity = FALSE; if HaveRASExt() then if ELUsingAArch32(EL2) then fault = AsyncExternalAbort(FALSE, VDFSR.AET, VDFSR.ExT); else fault = AsyncExternalAbort(FALSE, VSESR_EL2.AET, VSESR_EL2.ExT); else fault = AsyncExternalAbort(parity, pe_error_state, extflag); ClearPendingVirtualSError(); AArch32.ReportDataAbort(route_to_monitor, fault, vaddress); AArch32.EnterMode(M32_Abort, preferred_exception_return, lr_offset, vect_offset);
// AArch32.SoftwareBreakpoint() // ============================ AArch32.SoftwareBreakpoint(bits(16) immediate) if (EL2Enabled() && !ELUsingAArch32(EL2) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1')) || !ELUsingAArch32(EL1) then AArch64.SoftwareBreakpoint(immediate); vaddress = bits(32) UNKNOWN; acctype = AccType_IFETCH; // Take as a Prefetch Abort iswrite = FALSE; entry = DebugException_BKPT; fault = AArch32.DebugFault(acctype, iswrite, entry); AArch32.Abort(vaddress, fault);
constant bits(4) DebugException_Breakpoint = '0001'; constant bits(4) DebugException_BKPT = '0011'; constant bits(4) DebugException_VectorCatch = '0101'; constant bits(4) DebugException_Watchpoint = '1010';
// AArch32.CheckAdvSIMDOrFPRegisterTraps() // ======================================= // Check if an instruction that accesses an Advanced SIMD and // floating-point System register is trapped by an appropriate HCR.TIDx // ID group trap control. AArch32.CheckAdvSIMDOrFPRegisterTraps(bits(4) reg) if PSTATE.EL == EL1 && EL2Enabled() then tid0 = if ELUsingAArch32(EL2) then HCR.TID0 else HCR_EL2.TID0; tid3 = if ELUsingAArch32(EL2) then HCR.TID3 else HCR_EL2.TID3; if (tid0 == '1' && reg == '0000') // FPSID || (tid3 == '1' && reg IN {'0101', '0110', '0111'}) then // MVFRx if ELUsingAArch32(EL2) then AArch32.SystemAccessTrap(M32_Hyp, 0x8); // Exception_AdvSIMDFPAccessTrap else AArch64.AArch32SystemAccessTrap(EL2, 0x8); // Exception_AdvSIMDFPAccessTrap
// AArch32.ExceptionClass() // ======================== // Returns the Exception Class and Instruction Length fields to be reported in HSR (integer,bit) AArch32.ExceptionClass(Exception exceptype) il_is_valid = TRUE; case exceptype of when Exception_Uncategorized ec = 0x00; il_is_valid = FALSE; when Exception_WFxTrap ec = 0x01; when Exception_CP15RTTrap ec = 0x03; when Exception_CP15RRTTrap ec = 0x04; when Exception_CP14RTTrap ec = 0x05; when Exception_CP14DTTrap ec = 0x06; when Exception_AdvSIMDFPAccessTrap ec = 0x07; when Exception_FPIDTrap ec = 0x08; when Exception_PACTrap ec = 0x09; when Exception_TSTARTAccessTrap ec = 0x1B; when Exception_GPC ec = 0x1E; when Exception_CP14RRTTrap ec = 0x0C; when Exception_BranchTarget ec = 0x0D; when Exception_IllegalState ec = 0x0E; il_is_valid = FALSE; when Exception_SupervisorCall ec = 0x11; when Exception_HypervisorCall ec = 0x12; when Exception_MonitorCall ec = 0x13; when Exception_InstructionAbort ec = 0x20; il_is_valid = FALSE; when Exception_PCAlignment ec = 0x22; il_is_valid = FALSE; when Exception_DataAbort ec = 0x24; when Exception_NV2DataAbort ec = 0x25; when Exception_FPTrappedException ec = 0x28; otherwise Unreachable(); if ec IN {0x20,0x24} && PSTATE.EL == EL2 then ec = ec + 1; if il_is_valid then il = if ThisInstrLength() == 32 then '1' else '0'; else il = '1'; return (ec,il);
// AArch32.GeneralExceptionsToAArch64() // ==================================== // Returns TRUE if exceptions normally routed to EL1 are being handled at an Exception // level using AArch64, because either EL1 is using AArch64 or TGE is in force and EL2 // is using AArch64. boolean AArch32.GeneralExceptionsToAArch64() return ((PSTATE.EL == EL0 && !ELUsingAArch32(EL1)) || (EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1'));
// AArch32.ReportHypEntry() // ======================== // Report syndrome information to Hyp mode registers. AArch32.ReportHypEntry(ExceptionRecord exception) Exception exceptype = exception.exceptype; (ec,il) = AArch32.ExceptionClass(exceptype); iss = exception.syndrome; // IL is not valid for Data Abort exceptions without valid instruction syndrome information if ec IN {0x24,0x25} && iss<24> == '0' then il = '1'; HSR = ec<5:0>:il:iss; if exceptype IN {Exception_InstructionAbort, Exception_PCAlignment} then HIFAR = exception.vaddress<31:0>; HDFAR = bits(32) UNKNOWN; elsif exceptype == Exception_DataAbort then HIFAR = bits(32) UNKNOWN; HDFAR = exception.vaddress<31:0>; if exception.ipavalid then HPFAR<31:4> = exception.ipaddress<39:12>; else HPFAR<31:4> = bits(28) UNKNOWN; return;
// Resets System registers and memory-mapped control registers that have architecturally-defined // reset values to those values. AArch32.ResetControlRegisters(boolean cold_reset);
// AArch32.TakeReset() // =================== // Reset into AArch32 state AArch32.TakeReset(boolean cold_reset) assert !HaveAArch64(); // Enter the highest implemented Exception level in AArch32 state if HaveEL(EL3) then AArch32.WriteMode(M32_Svc); SCR.NS = '0'; // Secure state elsif HaveEL(EL2) then AArch32.WriteMode(M32_Hyp); else AArch32.WriteMode(M32_Svc); // Reset System registers in the coproc=0b111x encoding space and other system components AArch32.ResetControlRegisters(cold_reset); FPEXC.EN = '0'; // Reset all other PSTATE fields, including instruction set and endianness according to the // SCTLR values produced by the above call to ResetControlRegisters() PSTATE.<A,I,F> = '111'; // All asynchronous exceptions masked PSTATE.IT = '00000000'; // IT block state reset if HaveEL(EL2) && !HaveEL(EL3) then PSTATE.T = HSCTLR.TE; // Instruction set: TE=0: A32, TE=1: T32. PSTATE.J is RES0. PSTATE.E = HSCTLR.EE; // Endianness: EE=0: little-endian, EE=1: big-endian else PSTATE.T = SCTLR.TE; // Instruction set: TE=0: A32, TE=1: T32. PSTATE.J is RES0. PSTATE.E = SCTLR.EE; // Endianness: EE=0: little-endian, EE=1: big-endian PSTATE.IL = '0'; // Clear Illegal Execution state bit // All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call // below are UNKNOWN bitstrings after reset. In particular, the return information registers // R14 or ELR_hyp and SPSR have UNKNOWN values, so that it // is impossible to return from a reset in an architecturally defined way. AArch32.ResetGeneralRegisters(); AArch32.ResetSIMDFPRegisters(); AArch32.ResetSpecialRegisters(); ResetExternalDebugRegisters(cold_reset); bits(32) rv; // IMPLEMENTATION DEFINED reset vector if HaveEL(EL3) then if MVBAR<0> == '1' then // Reset vector in MVBAR rv = MVBAR<31:1>:'0'; else rv = bits(32) IMPLEMENTATION_DEFINED "reset vector address"; else rv = RVBAR<31:1>:'0'; // The reset vector must be correctly aligned assert rv<0> == '0' && (PSTATE.T == '1' || rv<1> == '0'); boolean branch_conditional = FALSE; BranchTo(rv, BranchType_RESET, branch_conditional);
// ExcVectorBase() // =============== bits(32) ExcVectorBase() if SCTLR.V == '1' then // Hivecs selected, base = 0xFFFF0000 return Ones(16):Zeros(16); else return VBAR<31:5>:Zeros(5);
// AArch32.FPTrappedException() // ============================ AArch32.FPTrappedException(bits(8) accumulated_exceptions) if AArch32.GeneralExceptionsToAArch64() then is_ase = FALSE; element = 0; AArch64.FPTrappedException(is_ase, accumulated_exceptions); FPEXC.DEX = '1'; FPEXC.TFV = '1'; FPEXC<7,4:0> = accumulated_exceptions<7,4:0>; // IDF,IXF,UFF,OFF,DZF,IOF FPEXC<10:8> = '111'; // VECITR is RES1 AArch32.TakeUndefInstrException();
// AArch32.CallHypervisor() // ======================== // Performs a HVC call AArch32.CallHypervisor(bits(16) immediate) assert HaveEL(EL2); if !ELUsingAArch32(EL2) then AArch64.CallHypervisor(immediate); else AArch32.TakeHVCException(immediate);
// AArch32.CallSupervisor() // ======================== // Calls the Supervisor AArch32.CallSupervisor(bits(16) immediate) if AArch32.CurrentCond() != '1110' then immediate = bits(16) UNKNOWN; if AArch32.GeneralExceptionsToAArch64() then AArch64.CallSupervisor(immediate); else AArch32.TakeSVCException(immediate);
// AArch32.TakeHVCException() // ========================== AArch32.TakeHVCException(bits(16) immediate) assert HaveEL(EL2) && ELUsingAArch32(EL2); AArch32.ITAdvance(); SSAdvance(); bits(32) preferred_exception_return = NextInstrAddr(); vect_offset = 0x08; exception = ExceptionSyndrome(Exception_HypervisorCall); exception.syndrome<15:0> = immediate; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14);
// AArch32.TakeSMCException() // ========================== AArch32.TakeSMCException() assert HaveEL(EL3) && ELUsingAArch32(EL3); AArch32.ITAdvance(); SSAdvance(); bits(32) preferred_exception_return = NextInstrAddr(); vect_offset = 0x08; lr_offset = 0; AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeSVCException() // ========================== AArch32.TakeSVCException(bits(16) immediate) AArch32.ITAdvance(); SSAdvance(); route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = NextInstrAddr(); vect_offset = 0x08; lr_offset = 0; if PSTATE.EL == EL2 || route_to_hyp then exception = ExceptionSyndrome(Exception_SupervisorCall); exception.syndrome<15:0> = immediate; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.EnterMode(M32_Svc, preferred_exception_return, lr_offset, vect_offset);
// AArch32.EnterHypMode() // ====================== // Take an exception to Hyp mode. AArch32.EnterHypMode(ExceptionRecord exception, bits(32) preferred_exception_return, integer vect_offset) SynchronizeContext(); assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2); bits(32) spsr = GetPSRFromPSTATE(AArch32_NonDebugState); if !(exception.exceptype IN {Exception_IRQ, Exception_FIQ}) then AArch32.ReportHypEntry(exception); AArch32.WriteMode(M32_Hyp); SPSR[] = spsr; ELR_hyp = preferred_exception_return; PSTATE.T = HSCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; if !HaveEL(EL3) || SCR_GEN[].EA == '0' then PSTATE.A = '1'; if !HaveEL(EL3) || SCR_GEN[].IRQ == '0' then PSTATE.I = '1'; if !HaveEL(EL3) || SCR_GEN[].FIQ == '0' then PSTATE.F = '1'; PSTATE.E = HSCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HaveSSBSExt() then PSTATE.SSBS = HSCTLR.DSSBS; boolean branch_conditional = FALSE; BranchTo(HVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION, branch_conditional); CheckExceptionCatch(TRUE); // Check for debug event on exception entry EndOfInstruction();
// AArch32.EnterMode() // =================== // Take an exception to a mode other than Monitor and Hyp mode. AArch32.EnterMode(bits(5) target_mode, bits(32) preferred_exception_return, integer lr_offset, integer vect_offset) SynchronizeContext(); assert ELUsingAArch32(EL1) && PSTATE.EL != EL2; bits(32) spsr = GetPSRFromPSTATE(AArch32_NonDebugState); if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(target_mode); SPSR[] = spsr; R[14] = preferred_exception_return + lr_offset; PSTATE.T = SCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; if target_mode == M32_FIQ then PSTATE.<A,I,F> = '111'; elsif target_mode IN {M32_Abort, M32_IRQ} then PSTATE.<A,I> = '11'; else PSTATE.I = '1'; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HavePANExt() && SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if HaveSSBSExt() then PSTATE.SSBS = SCTLR.DSSBS; boolean branch_conditional = FALSE; BranchTo(ExcVectorBase()<31:5>:vect_offset<4:0>, BranchType_EXCEPTION, branch_conditional); CheckExceptionCatch(TRUE); // Check for debug event on exception entry EndOfInstruction();
// AArch32.EnterMonitorMode() // ========================== // Take an exception to Monitor mode. AArch32.EnterMonitorMode(bits(32) preferred_exception_return, integer lr_offset, integer vect_offset) SynchronizeContext(); assert HaveEL(EL3) && ELUsingAArch32(EL3); from_secure = IsSecure(); bits(32) spsr = GetPSRFromPSTATE(AArch32_NonDebugState); if PSTATE.M == M32_Monitor then SCR.NS = '0'; AArch32.WriteMode(M32_Monitor); SPSR[] = spsr; R[14] = preferred_exception_return + lr_offset; PSTATE.T = SCTLR.TE; // PSTATE.J is RES0 PSTATE.SS = '0'; PSTATE.<A,I,F> = '111'; PSTATE.E = SCTLR.EE; PSTATE.IL = '0'; PSTATE.IT = '00000000'; if HavePANExt() then if !from_secure then PSTATE.PAN = '0'; elsif SCTLR.SPAN == '0' then PSTATE.PAN = '1'; if HaveSSBSExt() then PSTATE.SSBS = SCTLR.DSSBS; boolean branch_conditional = FALSE; BranchTo(MVBAR<31:5>:vect_offset<4:0>, BranchType_EXCEPTION, branch_conditional); CheckExceptionCatch(TRUE); // Check for debug event on exception entry EndOfInstruction();
// AArch32.CheckAdvSIMDOrFPEnabled() // ================================= // Check against CPACR, FPEXC, HCPTR, NSACR, and CPTR_EL3. AArch32.CheckAdvSIMDOrFPEnabled(boolean fpexc_check, boolean advsimd) if PSTATE.EL == EL0 && (!EL2Enabled() || (!ELUsingAArch32(EL2) && HCR_EL2.TGE == '0')) && !ELUsingAArch32(EL1) then // The PE behaves as if FPEXC.EN is 1 AArch64.CheckFPEnabled(); AArch64.CheckFPAdvSIMDEnabled(); elsif PSTATE.EL == EL0 && EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1' && !ELUsingAArch32(EL1) then if fpexc_check && HCR_EL2.RW == '0' then fpexc_en = bits(1) IMPLEMENTATION_DEFINED "FPEXC.EN value when TGE==1 and RW==0"; if fpexc_en == '0' then UNDEFINED; AArch64.CheckFPEnabled(); else cpacr_asedis = CPACR.ASEDIS; cpacr_cp10 = CPACR.cp10; if HaveEL(EL3) && ELUsingAArch32(EL3) && !IsSecure() then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then cpacr_asedis = '1'; if NSACR.cp10 == '0' then cpacr_cp10 = '00'; if PSTATE.EL != EL2 then // Check if Advanced SIMD disabled in CPACR if advsimd && cpacr_asedis == '1' then UNDEFINED; // Check if access disabled in CPACR case cpacr_cp10 of when '00' disabled = TRUE; when '01' disabled = PSTATE.EL == EL0; when '10' disabled = ConstrainUnpredictableBool(Unpredictable_RESCPACR); when '11' disabled = FALSE; if disabled then UNDEFINED; // If required, check FPEXC enabled bit. if fpexc_check && FPEXC.EN == '0' then UNDEFINED; AArch32.CheckFPAdvSIMDTrap(advsimd); // Also check against HCPTR and CPTR_EL3
// AArch32.CheckFPAdvSIMDTrap() // ============================ // Check against CPTR_EL2 and CPTR_EL3. AArch32.CheckFPAdvSIMDTrap(boolean advsimd) if EL2Enabled() && !ELUsingAArch32(EL2) then AArch64.CheckFPAdvSIMDTrap(); else if HaveEL(EL2) && !IsSecure() then hcptr_tase = HCPTR.TASE; hcptr_cp10 = HCPTR.TCP10; if HaveEL(EL3) && ELUsingAArch32(EL3) && !IsSecure() then // Check if access disabled in NSACR if NSACR.NSASEDIS == '1' then hcptr_tase = '1'; if NSACR.cp10 == '0' then hcptr_cp10 = '1'; // Check if access disabled in HCPTR if (advsimd && hcptr_tase == '1') || hcptr_cp10 == '1' then exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); exception.syndrome<24:20> = ConditionSyndrome(); if advsimd then exception.syndrome<5> = '1'; else exception.syndrome<5> = '0'; exception.syndrome<3:0> = '1010'; // coproc field, always 0xA if PSTATE.EL == EL2 then AArch32.TakeUndefInstrException(exception); else AArch32.TakeHypTrapException(exception); if HaveEL(EL3) && !ELUsingAArch32(EL3) then // Check if access disabled in CPTR_EL3 if CPTR_EL3.TFP == '1' then AArch64.AdvSIMDFPAccessTrap(EL3); return;
// AArch32.CheckForSMCUndefOrTrap() // ================================ // Check for UNDEFINED or trap on SMC instruction AArch32.CheckForSMCUndefOrTrap() if !HaveEL(EL3) || PSTATE.EL == EL0 then UNDEFINED; if EL2Enabled() && !ELUsingAArch32(EL2) then AArch64.CheckForSMCUndefOrTrap(Zeros(16)); else route_to_hyp = EL2Enabled() && PSTATE.EL == EL1 && HCR.TSC == '1'; if route_to_hyp then exception = ExceptionSyndrome(Exception_MonitorCall); AArch32.TakeHypTrapException(exception);
// AArch32.CheckForSVCTrap() // ========================= // Check for trap on SVC instruction AArch32.CheckForSVCTrap(bits(16) immediate) if HaveFGTExt() then route_to_el2 = FALSE; if PSTATE.EL == EL0 then route_to_el2 = (!ELUsingAArch32(EL1) && EL2Enabled() && HFGITR_EL2.SVC_EL0 == '1' && (HCR_EL2.<E2H, TGE> != '11' && (!HaveEL(EL3) || SCR_EL3.FGTEn == '1'))); if route_to_el2 then exception = ExceptionSyndrome(Exception_SupervisorCall); exception.syndrome<15:0> = immediate; bits(64) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x0; AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
// AArch32.CheckForWFxTrap() // ========================= // Check for trap on WFE or WFI instruction AArch32.CheckForWFxTrap(bits(2) target_el, WFxType wfxtype) assert HaveEL(target_el); // Check for routing to AArch64 if !ELUsingAArch32(target_el) then AArch64.CheckForWFxTrap(target_el, wfxtype); return; boolean is_wfe = wfxtype == WFxType_WFE; case target_el of when EL1 trap = (if is_wfe then SCTLR.nTWE else SCTLR.nTWI) == '0'; when EL2 trap = (if is_wfe then HCR.TWE else HCR.TWI) == '1'; when EL3 trap = (if is_wfe then SCR.TWE else SCR.TWI) == '1'; if trap then if target_el == EL1 && EL2Enabled() && !ELUsingAArch32(EL2) && HCR_EL2.TGE == '1' then AArch64.WFxTrap(wfxtype, target_el); if target_el == EL3 then AArch32.TakeMonitorTrapException(); elsif target_el == EL2 then exception = ExceptionSyndrome(Exception_WFxTrap); exception.syndrome<24:20> = ConditionSyndrome(); case wfxtype of when WFxType_WFI exception.syndrome<0> = '0'; when WFxType_WFE exception.syndrome<0> = '1'; AArch32.TakeHypTrapException(exception); else AArch32.TakeUndefInstrException();
// AArch32.CheckITEnabled() // ======================== // Check whether the T32 IT instruction is disabled. AArch32.CheckITEnabled(bits(4) mask) if PSTATE.EL == EL2 then it_disabled = HSCTLR.ITD; else it_disabled = (if ELUsingAArch32(EL1) then SCTLR.ITD else SCTLR[].ITD); if it_disabled == '1' then if mask != '1000' then UNDEFINED; // Otherwise whether the IT block is allowed depends on hw1 of the next instruction. next_instr = AArch32.MemSingle[NextInstrAddr(), 2, AccType_IFETCH, TRUE]; if next_instr IN {'11xxxxxxxxxxxxxx', '1011xxxxxxxxxxxx', '10100xxxxxxxxxxx', '01001xxxxxxxxxxx', '010001xxx1111xxx', '010001xx1xxxx111'} then // It is IMPLEMENTATION DEFINED whether the Undefined Instruction exception is // taken on the IT instruction or the next instruction. This is not reflected in // the pseudocode, which always takes the exception on the IT instruction. This // also does not take into account cases where the next instruction is UNPREDICTABLE. UNDEFINED; return;
// AArch32.CheckIllegalState() // =========================== // Check PSTATE.IL bit and generate Illegal Execution state exception if set. AArch32.CheckIllegalState() if AArch32.GeneralExceptionsToAArch64() then AArch64.CheckIllegalState(); elsif PSTATE.IL == '1' then route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; if PSTATE.EL == EL2 || route_to_hyp then exception = ExceptionSyndrome(Exception_IllegalState); if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); else AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.TakeUndefInstrException();
// AArch32.CheckSETENDEnabled() // ============================ // Check whether the AArch32 SETEND instruction is disabled. AArch32.CheckSETENDEnabled() if PSTATE.EL == EL2 then setend_disabled = HSCTLR.SED; else setend_disabled = (if ELUsingAArch32(EL1) then SCTLR.SED else SCTLR[].SED); if setend_disabled == '1' then UNDEFINED; return;
// AArch32.SystemAccessTrap() // ========================== // Trapped system register access. AArch32.SystemAccessTrap(bits(5) mode, integer ec) (valid, target_el) = ELFromM32(mode); assert valid && HaveEL(target_el) && target_el != EL0 && UInt(target_el) >= UInt(PSTATE.EL); if target_el == EL2 then exception = AArch32.SystemAccessTrapSyndrome(ThisInstr(), ec); AArch32.TakeHypTrapException(exception); else AArch32.TakeUndefInstrException();
// AArch32.SystemAccessTrapSyndrome() // ================================== // Returns the syndrome information for traps on AArch32 MCR, MCRR, MRC, MRRC, and VMRS, VMSR instructions, // other than traps that are due to HCPTR or CPACR. ExceptionRecord AArch32.SystemAccessTrapSyndrome(bits(32) instr, integer ec) ExceptionRecord exception; case ec of when 0x0 exception = ExceptionSyndrome(Exception_Uncategorized); when 0x3 exception = ExceptionSyndrome(Exception_CP15RTTrap); when 0x4 exception = ExceptionSyndrome(Exception_CP15RRTTrap); when 0x5 exception = ExceptionSyndrome(Exception_CP14RTTrap); when 0x6 exception = ExceptionSyndrome(Exception_CP14DTTrap); when 0x7 exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap); when 0x8 exception = ExceptionSyndrome(Exception_FPIDTrap); when 0xC exception = ExceptionSyndrome(Exception_CP14RRTTrap); otherwise Unreachable(); bits(20) iss = Zeros(); if exception.exceptype == Exception_Uncategorized then return exception; elsif exception.exceptype IN {Exception_FPIDTrap, Exception_CP14RTTrap, Exception_CP15RTTrap} then // Trapped MRC/MCR, VMRS on FPSID iss<13:10> = instr<19:16>; // CRn, Reg in case of VMRS iss<8:5> = instr<15:12>; // Rt iss<9> = '0'; // RES0 if exception.exceptype != Exception_FPIDTrap then // When trap is not for VMRS iss<19:17> = instr<7:5>; // opc2 iss<16:14> = instr<23:21>; // opc1 iss<4:1> = instr<3:0>; //CRm else //VMRS Access iss<19:17> = '000'; //opc2 - Hardcoded for VMRS iss<16:14> = '111'; //opc1 - Hardcoded for VMRS iss<4:1> = '0000'; //CRm - Hardcoded for VMRS elsif exception.exceptype IN {Exception_CP14RRTTrap, Exception_AdvSIMDFPAccessTrap, Exception_CP15RRTTrap} then // Trapped MRRC/MCRR, VMRS/VMSR iss<19:16> = instr<7:4>; // opc1 iss<13:10> = instr<19:16>; // Rt2 iss<8:5> = instr<15:12>; // Rt iss<4:1> = instr<3:0>; // CRm elsif exception.exceptype == Exception_CP14DTTrap then // Trapped LDC/STC iss<19:12> = instr<7:0>; // imm8 iss<4> = instr<23>; // U iss<2:1> = instr<24,21>; // P,W if instr<19:16> == '1111' then // Rn==15, LDC(Literal addressing)/STC iss<8:5> = bits(4) UNKNOWN; iss<3> = '1'; iss<0> = instr<20>; // Direction exception.syndrome<24:20> = ConditionSyndrome(); exception.syndrome<19:0> = iss; return exception;
// AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception. AArch32.TakeHypTrapException(integer ec) exception = AArch32.SystemAccessTrapSyndrome(ThisInstr(), ec); AArch32.TakeHypTrapException(exception); // AArch32.TakeHypTrapException() // ============================== // Exceptions routed to Hyp mode as a Hyp Trap exception. AArch32.TakeHypTrapException(ExceptionRecord exception) assert HaveEL(EL2) && !IsSecure() && ELUsingAArch32(EL2); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x14; AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset);
// AArch32.TakeMonitorTrapException() // ================================== // Exceptions routed to Monitor mode as a Monitor Trap exception. AArch32.TakeMonitorTrapException() assert HaveEL(EL3) && ELUsingAArch32(EL3); bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2; AArch32.EnterMonitorMode(preferred_exception_return, lr_offset, vect_offset);
// AArch32.TakeUndefInstrException() // ================================= AArch32.TakeUndefInstrException() exception = ExceptionSyndrome(Exception_Uncategorized); AArch32.TakeUndefInstrException(exception); // AArch32.TakeUndefInstrException() // ================================= AArch32.TakeUndefInstrException(ExceptionRecord exception) route_to_hyp = PSTATE.EL == EL0 && EL2Enabled() && HCR.TGE == '1'; bits(32) preferred_exception_return = ThisInstrAddr(); vect_offset = 0x04; lr_offset = if CurrentInstrSet() == InstrSet_A32 then 4 else 2; if PSTATE.EL == EL2 then AArch32.EnterHypMode(exception, preferred_exception_return, vect_offset); elsif route_to_hyp then AArch32.EnterHypMode(exception, preferred_exception_return, 0x14); else AArch32.EnterMode(M32_Undef, preferred_exception_return, lr_offset, vect_offset);
// AArch32.UndefinedFault() // ======================== AArch32.UndefinedFault() if AArch32.GeneralExceptionsToAArch64() then AArch64.UndefinedFault(); AArch32.TakeUndefInstrException();
// AArch32.DomainValid() // ===================== // Returns TRUE if the Domain is valid for a Short-descriptor translation scheme. boolean AArch32.DomainValid(Fault statuscode, integer level) assert statuscode != Fault_None; case statuscode of when Fault_Domain return TRUE; when Fault_Translation, Fault_AccessFlag, Fault_SyncExternalOnWalk, Fault_SyncParityOnWalk return level == 2; otherwise return FALSE;
// AArch32.FaultStatusLD() // ======================= // Creates an exception fault status value for Abort and Watchpoint exceptions taken // to Abort mode using AArch32 and Long-descriptor format. bits(32) AArch32.FaultStatusLD(boolean d_side, FaultRecord fault) assert fault.statuscode != Fault_None; bits(32) fsr = Zeros(); if HaveRASExt() && IsAsyncAbort(fault) then fsr<15:14> = fault.errortype; if d_side then if fault.acctype IN {AccType_DC, AccType_IC, AccType_AT, AccType_ATPAN} then fsr<13> = '1'; fsr<11> = '1'; else fsr<11> = if fault.write then '1' else '0'; if IsExternalAbort(fault) then fsr<12> = fault.extflag; fsr<9> = '1'; fsr<5:0> = EncodeLDFSC(fault.statuscode, fault.level); return fsr;
// AArch32.FaultStatusSD() // ======================= // Creates an exception fault status value for Abort and Watchpoint exceptions taken // to Abort mode using AArch32 and Short-descriptor format. bits(32) AArch32.FaultStatusSD(boolean d_side, FaultRecord fault) assert fault.statuscode != Fault_None; bits(32) fsr = Zeros(); if HaveRASExt() && IsAsyncAbort(fault) then fsr<15:14> = fault.errortype; if d_side then if fault.acctype IN {AccType_DC, AccType_IC, AccType_AT, AccType_ATPAN} then fsr<13> = '1'; fsr<11> = '1'; else fsr<11> = if fault.write then '1' else '0'; if IsExternalAbort(fault) then fsr<12> = fault.extflag; fsr<9> = '0'; fsr<10,3:0> = EncodeSDFSC(fault.statuscode, fault.level); if d_side then fsr<7:4> = fault.domain; // Domain field (data fault only) return fsr;
// AArch32.FaultSyndrome() // ======================= // Creates an exception syndrome value for Abort and Watchpoint exceptions taken to // AArch32 Hyp mode. bits(25) AArch32.FaultSyndrome(boolean d_side, FaultRecord fault) assert fault.statuscode != Fault_None; bits(25) iss = Zeros(); if HaveRASExt() && IsAsyncAbort(fault) then iss<11:10> = fault.errortype; // AET if d_side then if (IsSecondStage(fault) && !fault.s2fs1walk && (!IsExternalSyncAbort(fault) || (!HaveRASExt() && fault.acctype == AccType_TTW && boolean IMPLEMENTATION_DEFINED "ISV on second stage translation table walk"))) then iss<24:14> = LSInstructionSyndrome(); if fault.acctype IN {AccType_DC, AccType_IC, AccType_AT, AccType_ATPAN} then iss<8> = '1'; iss<6> = '1'; else iss<6> = if fault.write then '1' else '0'; if IsExternalAbort(fault) then iss<9> = fault.extflag; iss<7> = if fault.s2fs1walk then '1' else '0'; iss<5:0> = EncodeLDFSC(fault.statuscode, fault.level); return iss;
// EncodeSDFSC() // ============= // Function that gives the Short-descriptor FSR code for different types of Fault bits(5) EncodeSDFSC(Fault statuscode, integer level) bits(5) result; case statuscode of when Fault_AccessFlag assert level IN {1,2}; result = if level == 1 then '00011' else '00110'; when Fault_Alignment result = '00001'; when Fault_Permission assert level IN {1,2}; result = if level == 1 then '01101' else '01111'; when Fault_Domain assert level IN {1,2}; result = if level == 1 then '01001' else '01011'; when Fault_Translation assert level IN {1,2}; result = if level == 1 then '00101' else '00111'; when Fault_SyncExternal result = '01000'; when Fault_SyncExternalOnWalk assert level IN {1,2}; result = if level == 1 then '01100' else '01110'; when Fault_SyncParity result = '11001'; when Fault_SyncParityOnWalk assert level IN {1,2}; result = if level == 1 then '11100' else '11110'; when Fault_AsyncParity result = '11000'; when Fault_AsyncExternal result = '10110'; when Fault_Debug result = '00010'; when Fault_TLBConflict result = '10000'; when Fault_Lockdown result = '10100'; // IMPLEMENTATION DEFINED when Fault_Exclusive result = '10101'; // IMPLEMENTATION DEFINED when Fault_ICacheMaint result = '00100'; otherwise Unreachable(); return result;
// A32ExpandImm() // ============== bits(32) A32ExpandImm(bits(12) imm12) // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) = A32ExpandImm_C(imm12, PSTATE.C); return imm32;
// A32ExpandImm_C() // ================ (bits(32), bit) A32ExpandImm_C(bits(12) imm12, bit carry_in) unrotated_value = ZeroExtend(imm12<7:0>, 32); (imm32, carry_out) = Shift_C(unrotated_value, SRType_ROR, 2*UInt(imm12<11:8>), carry_in); return (imm32, carry_out);
// DecodeImmShift() // ================ (SRType, integer) DecodeImmShift(bits(2) srtype, bits(5) imm5) case srtype of when '00' shift_t = SRType_LSL; shift_n = UInt(imm5); when '01' shift_t = SRType_LSR; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '10' shift_t = SRType_ASR; shift_n = if imm5 == '00000' then 32 else UInt(imm5); when '11' if imm5 == '00000' then shift_t = SRType_RRX; shift_n = 1; else shift_t = SRType_ROR; shift_n = UInt(imm5); return (shift_t, shift_n);
// DecodeRegShift() // ================ SRType DecodeRegShift(bits(2) srtype) case srtype of when '00' shift_t = SRType_LSL; when '01' shift_t = SRType_LSR; when '10' shift_t = SRType_ASR; when '11' shift_t = SRType_ROR; return shift_t;
// RRX() // ===== bits(N) RRX(bits(N) x, bit carry_in) (result, -) = RRX_C(x, carry_in); return result;
// RRX_C() // ======= (bits(N), bit) RRX_C(bits(N) x, bit carry_in) result = carry_in : x<N-1:1>; carry_out = x<0>; return (result, carry_out);
enumeration SRType {SRType_LSL, SRType_LSR, SRType_ASR, SRType_ROR, SRType_RRX};
// Shift() // ======= bits(N) Shift(bits(N) value, SRType srtype, integer amount, bit carry_in) (result, -) = Shift_C(value, srtype, amount, carry_in); return result;
// Shift_C() // ========= (bits(N), bit) Shift_C(bits(N) value, SRType srtype, integer amount, bit carry_in) assert !(srtype == SRType_RRX && amount != 1); if amount == 0 then (result, carry_out) = (value, carry_in); else case srtype of when SRType_LSL (result, carry_out) = LSL_C(value, amount); when SRType_LSR (result, carry_out) = LSR_C(value, amount); when SRType_ASR (result, carry_out) = ASR_C(value, amount); when SRType_ROR (result, carry_out) = ROR_C(value, amount); when SRType_RRX (result, carry_out) = RRX_C(value, carry_in); return (result, carry_out);
// T32ExpandImm() // ============== bits(32) T32ExpandImm(bits(12) imm12) // PSTATE.C argument to following function call does not affect the imm32 result. (imm32, -) = T32ExpandImm_C(imm12, PSTATE.C); return imm32;
// T32ExpandImm_C() // ================ (bits(32), bit) T32ExpandImm_C(bits(12) imm12, bit carry_in) if imm12<11:10> == '00' then case imm12<9:8> of when '00' imm32 = ZeroExtend(imm12<7:0>, 32); when '01' imm32 = '00000000' : imm12<7:0> : '00000000' : imm12<7:0>; when '10' imm32 = imm12<7:0> : '00000000' : imm12<7:0> : '00000000'; when '11' imm32 = imm12<7:0> : imm12<7:0> : imm12<7:0> : imm12<7:0>; carry_out = carry_in; else unrotated_value = ZeroExtend('1':imm12<6:0>, 32); (imm32, carry_out) = ROR_C(unrotated_value, UInt(imm12<11:7>)); return (imm32, carry_out);
enumeration VCGEType {VCGEType_signed, VCGEType_unsigned, VCGEType_fp};
enumeration VFPNegMul {VFPNegMul_VNMLA, VFPNegMul_VNMLS, VFPNegMul_VNMUL};
// AArch32.CheckCP15InstrCoarseTraps() // =================================== // Check for coarse-grained traps to System registers in the // coproc=0b1111 encoding space by HSTR and HCR. AArch32.CheckCP15InstrCoarseTraps(integer CRn, integer nreg, integer CRm) if PSTATE.EL == EL0 && (!ELUsingAArch32(EL1) || (EL2Enabled() && !ELUsingAArch32(EL2))) then AArch64.CheckCP15InstrCoarseTraps(CRn, nreg, CRm); trapped_encoding = ((CRn == 9 && CRm IN {0,1,2, 5,6,7,8 }) || (CRn == 10 && CRm IN {0,1, 4, 8 }) || (CRn == 11 && CRm IN {0,1,2,3,4,5,6,7,8,15})); // Check for coarse-grained Hyp traps if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then major = if nreg == 1 then CRn else CRm; // Check for MCR, MRC, MCRR, and MRRC disabled by HSTR<CRn/CRm> // and MRC and MCR disabled by HCR.TIDCP. if ((!(major IN {4,14}) && HSTR<major> == '1') || (HCR.TIDCP == '1' && nreg == 1 && trapped_encoding)) then if (PSTATE.EL == EL0 && boolean IMPLEMENTATION_DEFINED "UNDEF unallocated CP15 access at EL0") then UNDEFINED; if ELUsingAArch32(EL2) then AArch32.SystemAccessTrap(M32_Hyp, 0x3); else AArch64.AArch32SystemAccessTrap(EL2, 0x3);
// AArch32.ExclusiveMonitorsPass() // =============================== // Return TRUE if the Exclusives monitors for the current PE include all of the addresses // associated with the virtual address region of size bytes starting at address. // The immediately following memory write must be to the same addresses. boolean AArch32.ExclusiveMonitorsPass(bits(32) address, integer size) // It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens // before or after the check on the local Exclusives monitor. As a result a failure // of the local monitor can occur on some implementations even if the memory // access would give an memory abort. acctype = AccType_ATOMIC; iswrite = TRUE; aligned = AArch32.CheckAlignment(address, size, acctype, iswrite); passed = AArch32.IsExclusiveVA(address, ProcessorID(), size); if !passed then return FALSE; memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); ClearExclusiveLocal(ProcessorID()); if passed then if memaddrdesc.memattrs.shareability != Shareability_NSH then passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); return passed;
// An optional IMPLEMENTATION DEFINED test for an exclusive access to a virtual // address region of size bytes starting at address. // // It is permitted (but not required) for this function to return FALSE and // cause a store exclusive to fail if the virtual address region is not // totally included within the region recorded by MarkExclusiveVA(). // // It is always safe to return TRUE which will check the physical address only. boolean AArch32.IsExclusiveVA(bits(32) address, integer processorid, integer size);
// Optionally record an exclusive access to the virtual address region of size bytes // starting at address for processorid. AArch32.MarkExclusiveVA(bits(32) address, integer processorid, integer size);
// AArch32.SetExclusiveMonitors() // ============================== // Sets the Exclusives monitors for the current PE to record the addresses associated // with the virtual address region of size bytes starting at address. AArch32.SetExclusiveMonitors(bits(32) address, integer size) acctype = AccType_ATOMIC; iswrite = FALSE; aligned = AArch32.CheckAlignment(address, size, acctype, iswrite); memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then return; if memaddrdesc.memattrs.shareability != Shareability_NSH then MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size); MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size); AArch32.MarkExclusiveVA(address, ProcessorID(), size);
// CheckAdvSIMDEnabled() // ===================== CheckAdvSIMDEnabled() fpexc_check = TRUE; advsimd = TRUE; AArch32.CheckAdvSIMDOrFPEnabled(fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if Advanced SIMD access is permitted // Make temporary copy of D registers // _Dclone[] is used as input data for instruction pseudocode for i = 0 to 31 _Dclone[i] = D[i]; return;
// CheckAdvSIMDOrVFPEnabled() // ========================== CheckAdvSIMDOrVFPEnabled(boolean include_fpexc_check, boolean advsimd) AArch32.CheckAdvSIMDOrFPEnabled(include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted return;
// CheckCryptoEnabled32() // ====================== CheckCryptoEnabled32() CheckAdvSIMDEnabled(); // Return from CheckAdvSIMDEnabled() occurs only if access is permitted return;
// CheckVFPEnabled() // ================= CheckVFPEnabled(boolean include_fpexc_check) advsimd = FALSE; AArch32.CheckAdvSIMDOrFPEnabled(include_fpexc_check, advsimd); // Return from CheckAdvSIMDOrFPEnabled() occurs only if VFP access is permitted return;
// FPHalvedSub() // ============= bits(N) FPHalvedSub(bits(N) op1, bits(N) op2, FPCRType fpcr) assert N IN {16,32,64}; rounding = FPRoundingMode(fpcr); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); if inf1 && inf2 && sign1 == sign2 then result = FPDefaultNaN(fpcr); FPProcessException(FPExc_InvalidOp, fpcr); elsif (inf1 && sign1 == '0') || (inf2 && sign2 == '1') then result = FPInfinity('0'); elsif (inf1 && sign1 == '1') || (inf2 && sign2 == '0') then result = FPInfinity('1'); elsif zero1 && zero2 && sign1 != sign2 then result = FPZero(sign1); else result_value = (value1 - value2) / 2.0; if result_value == 0.0 then // Sign of exact zero result depends on rounding mode result_sign = if rounding == FPRounding_NEGINF then '1' else '0'; result = FPZero(result_sign); else result = FPRound(result_value, fpcr); return result;
// FPRSqrtStep() // ============= bits(N) FPRSqrtStep(bits(N) op1, bits(N) op2) assert N IN {16,32}; FPCRType fpcr = StandardFPSCRValue(); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); bits(N) product; if (inf1 && zero2) || (zero1 && inf2) then product = FPZero('0'); else product = FPMul(op1, op2, fpcr); bits(N) three = FPThree('0'); result = FPHalvedSub(three, product, fpcr); return result;
// FPRecipStep() // ============= bits(N) FPRecipStep(bits(N) op1, bits(N) op2) assert N IN {16,32}; FPCRType fpcr = StandardFPSCRValue(); (type1,sign1,value1) = FPUnpack(op1, fpcr); (type2,sign2,value2) = FPUnpack(op2, fpcr); (done,result) = FPProcessNaNs(type1, type2, op1, op2, fpcr); if !done then inf1 = (type1 == FPType_Infinity); inf2 = (type2 == FPType_Infinity); zero1 = (type1 == FPType_Zero); zero2 = (type2 == FPType_Zero); bits(N) product; if (inf1 && zero2) || (zero1 && inf2) then product = FPZero('0'); else product = FPMul(op1, op2, fpcr); bits(N) two = FPTwo('0'); result = FPSub(two, product, fpcr); return result;
// StandardFPSCRValue() // ==================== FPCRType StandardFPSCRValue() bits(32) upper = '00000000000000000000000000000000'; bits(32) lower = '00000' : FPSCR.AHP : '110000' : FPSCR.FZ16 : '0000000000000000000'; return upper : lower;
// AArch32.CheckAlignment() // ======================== boolean AArch32.CheckAlignment(bits(32) address, integer alignment, AccType acctype, boolean iswrite) if PSTATE.EL == EL0 && !ELUsingAArch32(S1TranslationRegime()) then A = SCTLR[].A; //use AArch64 register, when higher Exception level is using AArch64 elsif PSTATE.EL == EL2 then A = HSCTLR.A; else A = SCTLR.A; aligned = (address == Align(address, alignment)); atomic = acctype IN { AccType_ATOMIC, AccType_ATOMICRW, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW, AccType_ATOMICLS64, AccType_A32LSMD}; ordered = acctype IN { AccType_ORDERED, AccType_ORDEREDRW, AccType_LIMITEDORDERED, AccType_ORDEREDATOMIC, AccType_ORDEREDATOMICRW }; vector = acctype == AccType_VEC; // AccType_VEC is used for SIMD element alignment checks only check = (atomic || ordered || vector || A == '1'); if check && !aligned then secondstage = FALSE; AArch32.Abort(address, AlignmentFault(acctype, iswrite, secondstage)); return aligned;
// AArch32.MemSingle[] - non-assignment (read) form // ================================================ // Perform an atomic, little-endian read of 'size' bytes. bits(size*8) AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean aligned] boolean ispair = FALSE; return AArch32.MemSingle[address, size, acctype, aligned, ispair]; // AArch32.MemSingle[] - non-assignment (read) form // ================================================ // Perform an atomic, little-endian read of 'size' bytes. bits(size*8) AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean aligned, boolean ispair] assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; bits(size*8) value; iswrite = FALSE; memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); // Memory array access accdesc = CreateAccessDescriptor(acctype); (memstatus, value) = PhysMemRead(memaddrdesc, size, accdesc); if IsFault(memstatus) then HandleExternalReadAbort(memstatus, memaddrdesc, size, accdesc); return value; // AArch32.MemSingle[] - assignment (write) form // ============================================= AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean aligned] = bits(size*8) value boolean ispair = FALSE; AArch32.MemSingle[address, size, acctype, aligned, ispair] = value; return; // AArch32.MemSingle[] - assignment (write) form // ============================================= // Perform an atomic, little-endian write of 'size' bytes. AArch32.MemSingle[bits(32) address, integer size, AccType acctype, boolean aligned, boolean ispair] = bits(size*8) value assert size IN {1, 2, 4, 8, 16}; assert address == Align(address, size); AddressDescriptor memaddrdesc; iswrite = TRUE; memaddrdesc = AArch32.TranslateAddress(address, acctype, iswrite, aligned, size); // Check for aborts or debug exceptions if IsFault(memaddrdesc) then AArch32.Abort(address, memaddrdesc.fault); // Effect on exclusives if memaddrdesc.memattrs.shareability != Shareability_NSH then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size); // Memory array access accdesc = CreateAccessDescriptor(acctype); memstatus = PhysMemWrite(memaddrdesc, size, accdesc, value); if IsFault(memstatus) then HandleExternalWriteAbort(memstatus, memaddrdesc, size, accdesc); return;
// MemA[] - non-assignment form // ============================ bits(8*size) MemA[bits(32) address, integer size] acctype = AccType_ATOMIC; return Mem_with_type[address, size, acctype]; // MemA[] - assignment form // ======================== MemA[bits(32) address, integer size] = bits(8*size) value acctype = AccType_ATOMIC; Mem_with_type[address, size, acctype] = value; return;
// MemO[] - non-assignment form // ============================ bits(8*size) MemO[bits(32) address, integer size] acctype = AccType_ORDERED; return Mem_with_type[address, size, acctype]; // MemO[] - assignment form // ======================== MemO[bits(32) address, integer size] = bits(8*size) value acctype = AccType_ORDERED; Mem_with_type[address, size, acctype] = value; return;
// MemS[] - non-assignment form // ============================ // Memory accessor for streaming load multiple instructions bits(8*size) MemS[bits(32) address, integer size] acctype = AccType_A32LSMD; return Mem_with_type[address, size, acctype]; // MemS[] - assignment form // ======================== // Memory accessor for streaming store multiple instructions MemS[bits(32) address, integer size] = bits(8*size) value acctype = AccType_A32LSMD; Mem_with_type[address, size, acctype] = value; return;
// MemU[] - non-assignment form // ============================ bits(8*size) MemU[bits(32) address, integer size] acctype = AccType_NORMAL; return Mem_with_type[address, size, acctype]; // MemU[] - assignment form // ======================== MemU[bits(32) address, integer size] = bits(8*size) value acctype = AccType_NORMAL; Mem_with_type[address, size, acctype] = value; return;
// MemU_unpriv[] - non-assignment form // =================================== bits(8*size) MemU_unpriv[bits(32) address, integer size] acctype = AccType_UNPRIV; return Mem_with_type[address, size, acctype]; // MemU_unpriv[] - assignment form // =============================== MemU_unpriv[bits(32) address, integer size] = bits(8*size) value acctype = AccType_UNPRIV; Mem_with_type[address, size, acctype] = value; return;
// Mem_with_type[] - non-assignment (read) form // ============================================ // Perform a read of 'size' bytes. The access byte order is reversed for a big-endian access. // Instruction fetches would call AArch32.MemSingle directly. bits(size*8) Mem_with_type[bits(32) address, integer size, AccType acctype] boolean ispair = FALSE; return Mem_with_type[address, size, acctype, ispair]; bits(size*8) Mem_with_type[bits(32) address, integer size, AccType acctype, boolean ispair] assert size IN {1, 2, 4, 8, 16}; constant halfsize = size DIV 2; bits(size * 8) value; boolean iswrite = FALSE; if ispair then // check alignment on size of element accessed, not overall access size aligned = AArch32.CheckAlignment(address, halfsize, acctype, iswrite); else aligned = AArch32.CheckAlignment(address, size, acctype, iswrite); if !aligned then assert size > 1; value<7:0> = AArch32.MemSingle[address, 1, acctype, aligned]; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 value<8*i+7:8*i> = AArch32.MemSingle[address+i, 1, acctype, aligned]; else value = AArch32.MemSingle[address, size, acctype, aligned, ispair]; if BigEndian(acctype) then value = BigEndianReverse(value); return value; // Mem_with_type[] - assignment (write) form // ========================================= // Perform a write of 'size' bytes. The byte order is reversed for a big-endian access. Mem_with_type[bits(32) address, integer size, AccType acctype] = bits(size*8) value boolean ispair = FALSE; Mem_with_type[address, size, acctype, ispair] = value; Mem_with_type[bits(32) address, integer size, AccType acctype, boolean ispair] = bits(size*8) value boolean iswrite = TRUE; constant halfsize = size DIV 2; if BigEndian(acctype) then value = BigEndianReverse(value); if ispair then // check alignment on size of element accessed, not overall access size aligned = AArch32.CheckAlignment(address, halfsize, acctype, iswrite); else aligned = AArch32.CheckAlignment(address, size, acctype, iswrite); if !aligned then assert size > 1; AArch32.MemSingle[address, 1, acctype, aligned] = value<7:0>; // For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory // access will generate an Alignment Fault, as to get this far means the first byte did // not, so we must be changing to a new translation page. c = ConstrainUnpredictable(Unpredictable_DEVPAGE2); assert c IN {Constraint_FAULT, Constraint_NONE}; if c == Constraint_NONE then aligned = TRUE; for i = 1 to size-1 AArch32.MemSingle[address+i, 1, acctype, aligned] = value<8*i+7:8*i>; else AArch32.MemSingle[address, size, acctype, aligned, ispair] = value; return;
// AArch32.ESBOperation() // ====================== // Perform the AArch32 ESB operation for ESB executed in AArch32 state AArch32.ESBOperation() // Check if routed to AArch64 state route_to_aarch64 = PSTATE.EL == EL0 && !ELUsingAArch32(EL1); if !route_to_aarch64 && EL2Enabled() && !ELUsingAArch32(EL2) then route_to_aarch64 = HCR_EL2.TGE == '1' || HCR_EL2.AMO == '1'; if !route_to_aarch64 && HaveEL(EL3) && !ELUsingAArch32(EL3) then route_to_aarch64 = SCR_EL3.EA == '1'; if route_to_aarch64 then AArch64.ESBOperation(); return; route_to_monitor = HaveEL(EL3) && ELUsingAArch32(EL3) && SCR.EA == '1'; route_to_hyp = PSTATE.EL IN {EL0, EL1} && EL2Enabled() && (HCR.TGE == '1' || HCR.AMO == '1'); if route_to_monitor then target = M32_Monitor; elsif route_to_hyp || PSTATE.M == M32_Hyp then target = M32_Hyp; else target = M32_Abort; if IsSecure() then mask_active = TRUE; elsif target == M32_Monitor then mask_active = SCR.AW == '1' && (!HaveEL(EL2) || (HCR.TGE == '0' && HCR.AMO == '0')); else mask_active = target == M32_Abort || PSTATE.M == M32_Hyp; mask_set = PSTATE.A == '1'; (-, el) = ELFromM32(target); intdis = Halted() || ExternalDebugInterruptsDisabled(el); masked = intdis || (mask_active && mask_set); // Check for a masked Physical SError pending that can be synchronized // by an Error synchronization event. if masked && IsSynchronizablePhysicalSErrorPending() then syndrome32 = AArch32.PhysicalSErrorSyndrome(); DISR = AArch32.ReportDeferredSError(syndrome32.AET, syndrome32.ExT); ClearPendingPhysicalSError(); return;
// Return the SError syndrome AArch32.SErrorSyndrome AArch32.PhysicalSErrorSyndrome();
// AArch32.ReportDeferredSError() // ============================== // Return deferred SError syndrome bits(32) AArch32.ReportDeferredSError(bits(2) AET, bit ExT) bits(32) target; target<31> = '1'; // A syndrome = Zeros(16); if PSTATE.EL == EL2 then syndrome<11:10> = AET; // AET syndrome<9> = ExT; // EA syndrome<5:0> = '010001'; // DFSC else syndrome<15:14> = AET; // AET syndrome<12> = ExT; // ExT syndrome<9> = TTBCR.EAE; // LPAE if TTBCR.EAE == '1' then // Long-descriptor format syndrome<5:0> = '010001'; // STATUS else // Short-descriptor format syndrome<10,3:0> = '10110'; // FS if HaveAArch64() then target<24:0> = ZeroExtend(syndrome);// Any RES0 fields must be set to zero else target<15:0> = syndrome; return target;
// AArch32.vESBOperation() // ======================= // Perform the ESB operation for virtual SError interrupts executed in AArch32 state AArch32.vESBOperation() assert PSTATE.EL IN {EL0, EL1} && EL2Enabled(); // Check for EL2 using AArch64 state if !ELUsingAArch32(EL2) then AArch64.vESBOperation(); return; // If physical SError interrupts are routed to Hyp mode, and TGE is not set, then a // virtual SError interrupt might be pending vSEI_enabled = HCR.TGE == '0' && HCR.AMO == '1'; vSEI_pending = vSEI_enabled && HCR.VA == '1'; vintdis = Halted() || ExternalDebugInterruptsDisabled(EL1); vmasked = vintdis || PSTATE.A == '1'; // Check for a masked virtual SError pending if vSEI_pending && vmasked then VDISR = AArch32.ReportDeferredSError(VDFSR<15:14>, VDFSR<12>); HCR.VA = '0'; // Clear pending virtual SError return;
// AArch32.ResetGeneralRegisters() // =============================== AArch32.ResetGeneralRegisters() for i = 0 to 7 R[i] = bits(32) UNKNOWN; for i = 8 to 12 Rmode[i, M32_User] = bits(32) UNKNOWN; Rmode[i, M32_FIQ] = bits(32) UNKNOWN; if HaveEL(EL2) then Rmode[13, M32_Hyp] = bits(32) UNKNOWN; // No R14_hyp for i = 13 to 14 Rmode[i, M32_User] = bits(32) UNKNOWN; Rmode[i, M32_FIQ] = bits(32) UNKNOWN; Rmode[i, M32_IRQ] = bits(32) UNKNOWN; Rmode[i, M32_Svc] = bits(32) UNKNOWN; Rmode[i, M32_Abort] = bits(32) UNKNOWN; Rmode[i, M32_Undef] = bits(32) UNKNOWN; if HaveEL(EL3) then Rmode[i, M32_Monitor] = bits(32) UNKNOWN; return;
// AArch32.ResetSIMDFPRegisters() // ============================== AArch32.ResetSIMDFPRegisters() for i = 0 to 15 Q[i] = bits(128) UNKNOWN; return;
// AArch32.ResetSpecialRegisters() // =============================== AArch32.ResetSpecialRegisters() // AArch32 special registers SPSR_fiq<31:0> = bits(32) UNKNOWN; SPSR_irq<31:0> = bits(32) UNKNOWN; SPSR_svc<31:0> = bits(32) UNKNOWN; SPSR_abt<31:0> = bits(32) UNKNOWN; SPSR_und<31:0> = bits(32) UNKNOWN; if HaveEL(EL2) then SPSR_hyp = bits(32) UNKNOWN; ELR_hyp = bits(32) UNKNOWN; if HaveEL(EL3) then SPSR_mon = bits(32) UNKNOWN; // External debug special registers DLR = bits(32) UNKNOWN; DSPSR = bits(32) UNKNOWN; return;
// ALUExceptionReturn() // ==================== ALUExceptionReturn(bits(32) address) if PSTATE.EL == EL2 then UNDEFINED; elsif PSTATE.M IN {M32_User,M32_System} then Constraint c = ConstrainUnpredictable(Unpredictable_ALUEXCEPTIONRETURN); assert c IN {Constraint_UNDEF, Constraint_NOP}; case c of when Constraint_UNDEF UNDEFINED; when Constraint_NOP EndOfInstruction(); else AArch32.ExceptionReturn(address, SPSR[]);
// ALUWritePC() // ============ ALUWritePC(bits(32) address) if CurrentInstrSet() == InstrSet_A32 then BXWritePC(address, BranchType_INDIR); else BranchWritePC(address, BranchType_INDIR);
// BXWritePC() // =========== BXWritePC(bits(32) address, BranchType branch_type) if address<0> == '1' then SelectInstrSet(InstrSet_T32); address<0> = '0'; else SelectInstrSet(InstrSet_A32); // For branches to an unaligned PC counter in A32 state, the processor takes the branch // and does one of: // * Forces the address to be aligned // * Leaves the PC unaligned, meaning the target generates a PC Alignment fault. if address<1> == '1' && ConstrainUnpredictableBool(Unpredictable_A32FORCEALIGNPC) then address<1> = '0'; boolean branch_conditional = AArch32.CurrentCond() != '111x'; BranchTo(address, branch_type, branch_conditional);
// BranchWritePC() // =============== BranchWritePC(bits(32) address, BranchType branch_type) if CurrentInstrSet() == InstrSet_A32 then address<1:0> = '00'; else address<0> = '0'; boolean branch_conditional = AArch32.CurrentCond() != '111x'; BranchTo(address, branch_type, branch_conditional);
// CBWritePC() // =========== // Takes a branch from a CBNZ/CBZ instruction. CBWritePC(bits(32) address) assert CurrentInstrSet() == InstrSet_T32; address<0> = '0'; boolean branch_conditional = TRUE; BranchTo(address, BranchType_DIR, branch_conditional);
// D[] - non-assignment form // ========================= bits(64) D[integer n] assert n >= 0 && n <= 31; base = (n MOD 2) * 64; bits(128) vreg = V[n DIV 2]; return vreg<base+63:base>; // D[] - assignment form // ===================== D[integer n] = bits(64) value assert n >= 0 && n <= 31; base = (n MOD 2) * 64; bits(128) vreg = V[n DIV 2]; vreg<base+63:base> = value; V[n DIV 2] = vreg; return;
// Din[] - non-assignment form // =========================== bits(64) Din[integer n] assert n >= 0 && n <= 31; return _Dclone[n];
// LR - assignment form // ==================== LR = bits(32) value R[14] = value; return; // LR - non-assignment form // ======================== bits(32) LR return R[14];
// LoadWritePC() // ============= LoadWritePC(bits(32) address) BXWritePC(address, BranchType_INDIR);
// LookUpRIndex() // ============== integer LookUpRIndex(integer n, bits(5) mode) assert n >= 0 && n <= 14; case n of // Select index by mode: usr fiq irq svc abt und hyp when 8 result = RBankSelect(mode, 8, 24, 8, 8, 8, 8, 8); when 9 result = RBankSelect(mode, 9, 25, 9, 9, 9, 9, 9); when 10 result = RBankSelect(mode, 10, 26, 10, 10, 10, 10, 10); when 11 result = RBankSelect(mode, 11, 27, 11, 11, 11, 11, 11); when 12 result = RBankSelect(mode, 12, 28, 12, 12, 12, 12, 12); when 13 result = RBankSelect(mode, 13, 29, 17, 19, 21, 23, 15); when 14 result = RBankSelect(mode, 14, 30, 16, 18, 20, 22, 14); otherwise result = n; return result;
// PC - non-assignment form // ======================== bits(32) PC return R[15]; // This includes the offset from AArch32 state
// PCStoreValue() // ============== bits(32) PCStoreValue() // This function returns the PC value. On architecture versions before Armv7, it // is permitted to instead return PC+4, provided it does so consistently. It is // used only to describe A32 instructions, so it returns the address of the current // instruction plus 8 (normally) or 12 (when the alternative is permitted). return PC;
// Q[] - non-assignment form // ========================= bits(128) Q[integer n] assert n >= 0 && n <= 15; return V[n]; // Q[] - assignment form // ===================== Q[integer n] = bits(128) value assert n >= 0 && n <= 15; V[n] = value; return;
// Qin[] - non-assignment form // =========================== bits(128) Qin[integer n] assert n >= 0 && n <= 15; return Din[2*n+1]:Din[2*n];
// R[] - assignment form // ===================== R[integer n] = bits(32) value Rmode[n, PSTATE.M] = value; return; // R[] - non-assignment form // ========================= bits(32) R[integer n] if n == 15 then offset = (if CurrentInstrSet() == InstrSet_A32 then 8 else 4); return _PC<31:0> + offset; else return Rmode[n, PSTATE.M];
// RBankSelect() // ============= integer RBankSelect(bits(5) mode, integer usr, integer fiq, integer irq, integer svc, integer abt, integer und, integer hyp) case mode of when M32_User result = usr; // User mode when M32_FIQ result = fiq; // FIQ mode when M32_IRQ result = irq; // IRQ mode when M32_Svc result = svc; // Supervisor mode when M32_Abort result = abt; // Abort mode when M32_Hyp result = hyp; // Hyp mode when M32_Undef result = und; // Undefined mode when M32_System result = usr; // System mode uses User mode registers otherwise Unreachable(); // Monitor mode return result;
// Rmode[] - non-assignment form // ============================= bits(32) Rmode[integer n, bits(5) mode] assert n >= 0 && n <= 14; // Check for attempted use of Monitor mode in Non-secure state. if !IsSecure() then assert mode != M32_Monitor; assert !BadMode(mode); if mode == M32_Monitor then if n == 13 then return SP_mon; elsif n == 14 then return LR_mon; else return _R[n]<31:0>; else return _R[LookUpRIndex(n, mode)]<31:0>; // Rmode[] - assignment form // ========================= Rmode[integer n, bits(5) mode] = bits(32) value assert n >= 0 && n <= 14; // Check for attempted use of Monitor mode in Non-secure state. if !IsSecure() then assert mode != M32_Monitor; assert !BadMode(mode); if mode == M32_Monitor then if n == 13 then SP_mon = value; elsif n == 14 then LR_mon = value; else _R[n]<31:0> = value; else // It is CONSTRAINED UNPREDICTABLE whether the upper 32 bits of the X // register are unchanged or set to zero. This is also tested for on // exception entry, as this applies to all AArch32 registers. if HaveAArch64() && ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then _R[LookUpRIndex(n, mode)] = ZeroExtend(value); else _R[LookUpRIndex(n, mode)]<31:0> = value; return;
// S[] - non-assignment form // ========================= bits(32) S[integer n] assert n >= 0 && n <= 31; base = (n MOD 4) * 32; bits(128) vreg = V[n DIV 4]; return vreg<base+31:base>; // S[] - assignment form // ===================== S[integer n] = bits(32) value assert n >= 0 && n <= 31; base = (n MOD 4) * 32; bits(128) vreg = V[n DIV 4]; vreg<base+31:base> = value; V[n DIV 4] = vreg; return;
// SP - assignment form // ==================== SP = bits(32) value R[13] = value; return; // SP - non-assignment form // ======================== bits(32) SP return R[13];
// AArch32.ExceptionReturn() // ========================= AArch32.ExceptionReturn(bits(32) new_pc, bits(32) spsr) SynchronizeContext(); // Attempts to change to an illegal mode or state will invoke the Illegal Execution state // mechanism SetPSTATEFromPSR(spsr); ClearExclusiveLocal(ProcessorID()); SendEventLocal(); if PSTATE.IL == '1' then // If the exception return is illegal, PC[1:0] are UNKNOWN new_pc<1:0> = bits(2) UNKNOWN; else // LR[1:0] or LR[0] are treated as being 0, depending on the target instruction set state if PSTATE.T == '1' then new_pc<0> = '0'; // T32 else new_pc<1:0> = '00'; // A32 boolean branch_conditional = AArch32.CurrentCond() != '111x'; BranchTo(new_pc, BranchType_ERET, branch_conditional); CheckExceptionCatch(FALSE); // Check for debug event on exception return
// AArch32.ExecutingCP10or11Instr() // ================================ boolean AArch32.ExecutingCP10or11Instr() instr = ThisInstr(); instr_set = CurrentInstrSet(); assert instr_set IN {InstrSet_A32, InstrSet_T32}; if instr_set == InstrSet_A32 then return ((instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x'); else // InstrSet_T32 return (instr<31:28> == '111x' && (instr<27:24> == '1110' || instr<27:25> == '110') && instr<11:8> == '101x');
// AArch32.ITAdvance() // =================== AArch32.ITAdvance() if PSTATE.IT<2:0> == '000' then PSTATE.IT = '00000000'; else PSTATE.IT<4:0> = LSL(PSTATE.IT<4:0>, 1); return;
// Read from a 32-bit AArch32 System register and return the register's contents. bits(32) AArch32.SysRegRead(integer cp_num, bits(32) instr);
// Read from a 64-bit AArch32 System register and return the register's contents. bits(64) AArch32.SysRegRead64(integer cp_num, bits(32) instr);
// AArch32.SysRegReadCanWriteAPSR() // ================================ // Determines whether the AArch32 System register read instruction can write to APSR flags. boolean AArch32.SysRegReadCanWriteAPSR(integer cp_num, bits(32) instr) assert UsingAArch32(); assert (cp_num IN {14,15}); assert cp_num == UInt(instr<11:8>); opc1 = UInt(instr<23:21>); opc2 = UInt(instr<7:5>); CRn = UInt(instr<19:16>); CRm = UInt(instr<3:0>); if cp_num == 14 && opc1 == 0 && CRn == 0 && CRm == 1 && opc2 == 0 then // DBGDSCRint return TRUE; return FALSE;
// Write to a 32-bit AArch32 System register. AArch32.SysRegWrite(integer cp_num, bits(32) instr, bits(32) val);
// Write to a 64-bit AArch32 System register. AArch32.SysRegWrite64(integer cp_num, bits(32) instr, bits(64) val);
// Read a value from a virtual address and write it to an AArch32 System register. AArch32.SysRegWriteM(integer cp_num, bits(32) instr, bits(32) address);
// AArch32.WriteMode() // =================== // Function for dealing with writes to PSTATE.M from AArch32 state only. // This ensures that PSTATE.EL and PSTATE.SP are always valid. AArch32.WriteMode(bits(5) mode) (valid,el) = ELFromM32(mode); assert valid; PSTATE.M = mode; PSTATE.EL = el; PSTATE.nRW = '1'; PSTATE.SP = (if mode IN {M32_User,M32_System} then '0' else '1'); return;
// AArch32.WriteModeByInstr() // ========================== // Function for dealing with writes to PSTATE.M from an AArch32 instruction, and ensuring that // illegal state changes are correctly flagged in PSTATE.IL. AArch32.WriteModeByInstr(bits(5) mode) (valid,el) = ELFromM32(mode); // 'valid' is set to FALSE if' mode' is invalid for this implementation or the current value // of SCR.NS/SCR_EL3.NS. Additionally, it is illegal for an instruction to write 'mode' to // PSTATE.EL if it would result in any of: // * A change to a mode that would cause entry to a higher Exception level. if UInt(el) > UInt(PSTATE.EL) then valid = FALSE; // * A change to or from Hyp mode. if (PSTATE.M == M32_Hyp || mode == M32_Hyp) && PSTATE.M != mode then valid = FALSE; // * When EL2 is implemented, the value of HCR.TGE is '1', a change to a Non-secure EL1 mode. if PSTATE.M == M32_Monitor && HaveEL(EL2) && el == EL1 && SCR.NS == '1' && HCR.TGE == '1' then valid = FALSE; if !valid then PSTATE.IL = '1'; else AArch32.WriteMode(mode);
// BadMode() // ========= boolean BadMode(bits(5) mode) // Return TRUE if 'mode' encodes a mode that is not valid for this implementation case mode of when M32_Monitor valid = HaveAArch32EL(EL3); when M32_Hyp valid = HaveAArch32EL(EL2); when M32_FIQ, M32_IRQ, M32_Svc, M32_Abort, M32_Undef, M32_System // If EL3 is implemented and using AArch32, then these modes are EL3 modes in Secure // state, and EL1 modes in Non-secure state. If EL3 is not implemented or is using // AArch64, then these modes are EL1 modes. // Therefore it is sufficient to test this implementation supports EL1 using AArch32. valid = HaveAArch32EL(EL1); when M32_User valid = HaveAArch32EL(EL0); otherwise valid = FALSE; // Passed an illegal mode value return !valid;
// BankedRegisterAccessValid() // =========================== // Checks for MRS (Banked register) or MSR (Banked register) accesses to registers // other than the SPSRs that are invalid. This includes ELR_hyp accesses. BankedRegisterAccessValid(bits(5) SYSm, bits(5) mode) case SYSm of when '000xx', '00100' // R8_usr to R12_usr if mode != M32_FIQ then UNPREDICTABLE; when '00101' // SP_usr if mode == M32_System then UNPREDICTABLE; when '00110' // LR_usr if mode IN {M32_Hyp,M32_System} then UNPREDICTABLE; when '010xx', '0110x', '01110' // R8_fiq to R12_fiq, SP_fiq, LR_fiq if mode == M32_FIQ then UNPREDICTABLE; when '1000x' // LR_irq, SP_irq if mode == M32_IRQ then UNPREDICTABLE; when '1001x' // LR_svc, SP_svc if mode == M32_Svc then UNPREDICTABLE; when '1010x' // LR_abt, SP_abt if mode == M32_Abort then UNPREDICTABLE; when '1011x' // LR_und, SP_und if mode == M32_Undef then UNPREDICTABLE; when '1110x' // LR_mon, SP_mon if !HaveEL(EL3) || !IsSecure() || mode == M32_Monitor then UNPREDICTABLE; when '11110' // ELR_hyp, only from Monitor or Hyp mode if !HaveEL(EL2) || !(mode IN {M32_Monitor,M32_Hyp}) then UNPREDICTABLE; when '11111' // SP_hyp, only from Monitor mode if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE; otherwise UNPREDICTABLE; return;
// CPSRWriteByInstr() // ================== // Update PSTATE.<N,Z,C,V,Q,GE,E,A,I,F,M> from a CPSR value written by an MSR instruction. CPSRWriteByInstr(bits(32) value, bits(4) bytemask) privileged = PSTATE.EL != EL0; // PSTATE.<A,I,F,M> are not writable at EL0 // Write PSTATE from 'value', ignoring bytes masked by 'bytemask' if bytemask<3> == '1' then PSTATE.<N,Z,C,V,Q> = value<31:27>; // Bits <26:24> are ignored if bytemask<2> == '1' then if HaveSSBSExt() then PSTATE.SSBS = value<23>; if privileged then PSTATE.PAN = value<22>; if HaveDITExt() then PSTATE.DIT = value<21>; // Bit <20> is RES0 PSTATE.GE = value<19:16>; if bytemask<1> == '1' then // Bits <15:10> are RES0 PSTATE.E = value<9>; // PSTATE.E is writable at EL0 if privileged then PSTATE.A = value<8>; if bytemask<0> == '1' then if privileged then PSTATE.<I,F> = value<7:6>; // Bit <5> is RES0 // AArch32.WriteModeByInstr() sets PSTATE.IL to 1 if this is an illegal mode change. AArch32.WriteModeByInstr(value<4:0>); return;
// ConditionPassed() // ================= boolean ConditionPassed() return ConditionHolds(AArch32.CurrentCond());
// InITBlock() // =========== boolean InITBlock() if CurrentInstrSet() == InstrSet_T32 then return PSTATE.IT<3:0> != '0000'; else return FALSE;
// LastInITBlock() // =============== boolean LastInITBlock() return (PSTATE.IT<3:0> == '1000');
// SPSRWriteByInstr() // ================== SPSRWriteByInstr(bits(32) value, bits(4) bytemask) bits(32) new_spsr = SPSR[]; if bytemask<3> == '1' then new_spsr<31:24> = value<31:24>; // N,Z,C,V,Q flags, IT[1:0],J bits if bytemask<2> == '1' then new_spsr<23:16> = value<23:16>; // IL bit, GE[3:0] flags if bytemask<1> == '1' then new_spsr<15:8> = value<15:8>; // IT[7:2] bits, E bit, A interrupt mask if bytemask<0> == '1' then new_spsr<7:0> = value<7:0>; // I,F interrupt masks, T bit, Mode bits SPSR[] = new_spsr; // UNPREDICTABLE if User or System mode return;
// SPSRaccessValid() // ================= // Checks for MRS (Banked register) or MSR (Banked register) accesses to the SPSRs // that are UNPREDICTABLE SPSRaccessValid(bits(5) SYSm, bits(5) mode) case SYSm of when '01110' // SPSR_fiq if mode == M32_FIQ then UNPREDICTABLE; when '10000' // SPSR_irq if mode == M32_IRQ then UNPREDICTABLE; when '10010' // SPSR_svc if mode == M32_Svc then UNPREDICTABLE; when '10100' // SPSR_abt if mode == M32_Abort then UNPREDICTABLE; when '10110' // SPSR_und if mode == M32_Undef then UNPREDICTABLE; when '11100' // SPSR_mon if !HaveEL(EL3) || mode == M32_Monitor || !IsSecure() then UNPREDICTABLE; when '11110' // SPSR_hyp if !HaveEL(EL2) || mode != M32_Monitor then UNPREDICTABLE; otherwise UNPREDICTABLE; return;
// SelectInstrSet() // ================ SelectInstrSet(InstrSet iset) assert CurrentInstrSet() IN {InstrSet_A32, InstrSet_T32}; assert iset IN {InstrSet_A32, InstrSet_T32}; PSTATE.T = if iset == InstrSet_A32 then '0' else '1'; return;
// Sat() // ===== bits(N) Sat(integer i, integer N, boolean unsigned) result = if unsigned then UnsignedSat(i, N) else SignedSat(i, N); return result;
// SignedSat() // =========== bits(N) SignedSat(integer i, integer N) (result, -) = SignedSatQ(i, N); return result;
// UnsignedSat() // ============= bits(N) UnsignedSat(integer i, integer N) (result, -) = UnsignedSatQ(i, N); return result;
// AArch32.IC() // ============ // Perform Instruction Cache Operation. AArch32.IC(CacheOpScope opscope) regval = bits(32) UNKNOWN; AArch32.IC(regval, opscope); // AArch32.IC() // ============ // Perform Instruction Cache Operation. AArch32.IC(bits(32) regval, CacheOpScope opscope) CacheRecord cache; AccType acctype = AccType_IC; cache.acctype = acctype; cache.cachetype = CacheType_Instruction; cache.cacheop = CacheOp_Invalidate; cache.opscope = opscope; cache.security = SecurityStateAtEL(PSTATE.EL); if opscope IN {CacheOpScope_ALLU, CacheOpScope_ALLUIS} then if opscope == CacheOpScope_ALLUIS || (opscope == CacheOpScope_ALLU && PSTATE.EL == EL1 && EL2Enabled() && HCR.FB == '1') then cache.shareability = Shareability_ISH; else cache.shareability = Shareability_NSH; cache.regval = ZeroExtend(regval); CACHE_OP(cache); else assert opscope == CacheOpScope_PoU; if EL2Enabled() then if PSTATE.EL IN {EL0, EL1} then cache.is_vmid_valid = TRUE; cache.vmid = VMID[]; else cache.is_vmid_valid = FALSE; else cache.is_vmid_valid = FALSE; if PSTATE.EL == EL0 then cache.is_asid_valid = TRUE; cache.asid = ASID[]; else cache.is_asid_valid = FALSE; need_translate = ICInstNeedsTranslation(opscope); cache.shareability = Shareability_NSH; cache.vaddress = ZeroExtend(regval); cache.translated = need_translate; if !need_translate then cache.paddress = FullAddress UNKNOWN; CACHE_OP(cache); return; wasaligned = TRUE; iswrite = FALSE; size = 0; memaddrdesc = AArch32.TranslateAddress(regval, acctype, iswrite, wasaligned, size); if IsFault(memaddrdesc) then AArch32.Abort(regval, memaddrdesc.fault); cache.paddress = memaddrdesc.paddress; CACHE_OP(cache); return;
// RestrictPrediction() // ==================== // Clear all predictions in the context. AArch32.RestrictPrediction(bits(32) val, RestrictType restriction) ExecutionCntxt c; target_el = val<25:24>; // If the instruction is executed at an EL lower than the specified // level, it is treated as a NOP. if UInt(target_el) > UInt(PSTATE.EL) then return; bit ns = val<26>; bit nse = bit UNKNOWN; ss = TargetSecurityState(ns, nse); c.security = ss; c.target_el = target_el; if EL2Enabled() then if PSTATE.EL IN {EL0, EL1} then c.is_vmid_valid = TRUE; c.all_vmid = FALSE; c.vmid = VMID[]; elsif target_el IN {EL0, EL1} then c.is_vmid_valid = TRUE; c.all_vmid = val<27> == '1'; c.vmid = ZeroExtend(val<23:16>, 16); // Only valid if val<27> == '0'; else c.is_vmid_valid = FALSE; else c.is_vmid_valid = FALSE; if PSTATE.EL == EL0 then c.is_asid_valid = TRUE; c.all_asid = FALSE; c.asid = ASID[]; elsif target_el == EL0 then c.is_asid_valid = TRUE; c.all_asid = val<8> == '1'; c.asid = ZeroExtend(val<7:0>, 16); // Only valid if val<8> == '0'; else c.is_asid_valid = FALSE; c.restriction = restriction; RESTRICT_PREDICTIONS(c);
// AArch32.DefaultTEXDecode() // ========================== // Apply short-descriptor format memory region attributes, without TEX remap MemoryAttributes AArch32.DefaultTEXDecode(bits(3) TEX, bit C, bit B, bit S) MemoryAttributes memattrs; // Reserved values map to allocated values if (TEX == '001' && C:B == '01') || (TEX == '010' && C:B != '00') || TEX == '011' then bits(5) texcb; (-, texcb) = ConstrainUnpredictableBits(Unpredictable_RESTEXCB); TEX = texcb<4:2>; C = texcb<1>; B = texcb<0>; // Distinction between Inner Shareable and Outer Shareable is not supported in this format // A memory region is either Non-shareable or Outer Shareable case TEX:C:B of when '00000' // Device-nGnRnE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRnE; memattrs.shareability = Shareability_OSH; when '00001', '01000' // Device-nGnRE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRE; memattrs.shareability = Shareability_OSH; when '00010' // Write-through Read allocate memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_WT; memattrs.inner.hints = MemHint_RA; memattrs.outer.attrs = MemAttr_WT; memattrs.outer.hints = MemHint_RA; memattrs.shareability = if S == '1' then Shareability_OSH else Shareability_NSH; when '00011' // Write-back Read allocate memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_WB; memattrs.inner.hints = MemHint_RA; memattrs.outer.attrs = MemAttr_WB; memattrs.outer.hints = MemHint_RA; memattrs.shareability = if S == '1' then Shareability_OSH else Shareability_NSH; when '00100' // Non-cacheable memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_NC; memattrs.outer.attrs = MemAttr_NC; memattrs.shareability = Shareability_OSH; when '00110' memattrs = MemoryAttributes IMPLEMENTATION_DEFINED; when '00111' // Write-back Read and Write allocate memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_WB; memattrs.inner.hints = MemHint_RWA; memattrs.outer.attrs = MemAttr_WB; memattrs.outer.hints = MemHint_RWA; memattrs.shareability = if S == '1' then Shareability_OSH else Shareability_NSH; when '1xxxx' // Cacheable, TEX<1:0> = Outer attrs, {C,B} = Inner attrs memattrs.memtype = MemType_Normal; memattrs.inner = DecodeSDFAttr(C:B); memattrs.outer = DecodeSDFAttr(TEX<1:0>); if memattrs.inner.attrs == MemAttr_NC && memattrs.outer.attrs == MemAttr_NC then memattrs.shareability = Shareability_OSH; else memattrs.shareability = if S == '1' then Shareability_OSH else Shareability_NSH; otherwise // Reserved, handled above Unreachable(); // The Transient hint is not supported in this format memattrs.inner.transient = FALSE; memattrs.outer.transient = FALSE; memattrs.tagged = FALSE; if memattrs.inner.attrs == MemAttr_WB && memattrs.outer.attrs == MemAttr_WB then memattrs.xs = '0'; else memattrs.xs = '1'; return memattrs;
// AArch32.RemappedTEXDecode() // =========================== // Apply short-descriptor format memory region attributes, with TEX remap MemoryAttributes AArch32.RemappedTEXDecode(Regime regime, bits(3) TEX, bit C, bit B, bit S) MemoryAttributes memattrs; region = UInt(TEX<0>:C:B); // TEX<2:1> are ignored in this mapping scheme if region == 6 then return MemoryAttributes IMPLEMENTATION_DEFINED; if regime == Regime_EL30 then prrr = PRRR_S; nmrr = NMRR_S; elsif HaveAArch32EL(EL3) then prrr = PRRR_NS; nmrr = NMRR_NS; else prrr = PRRR; nmrr = NMRR; base = 2 * region; attrfield = prrr<base+1:base>; if attrfield == '11' then // Reserved, maps to allocated value (-, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESPRRR); case attrfield of when '00' // Device-nGnRnE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRnE; memattrs.shareability = Shareability_OSH; when '01' // Device-nGnRE memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRE; memattrs.shareability = Shareability_OSH; when '10' NSn = if S == '0' then prrr.NS0 else prrr.NS1; NOSm = prrr<region+24> AND NSn; IRn = nmrr<base+1:base>; ORn = nmrr<base+17:base+16>; memattrs.memtype = MemType_Normal; memattrs.inner = DecodeSDFAttr(IRn); memattrs.outer = DecodeSDFAttr(ORn); if memattrs.inner.attrs == MemAttr_NC && memattrs.outer.attrs == MemAttr_NC then memattrs.shareability = Shareability_OSH; else bits(2) sh = NSn:NOSm; memattrs.shareability = DecodeShareability(sh); when '11' Unreachable(); // The Transient hint is not supported in this format memattrs.inner.transient = FALSE; memattrs.outer.transient = FALSE; memattrs.tagged = FALSE; if memattrs.inner.attrs == MemAttr_WB && memattrs.outer.attrs == MemAttr_WB then memattrs.xs = '0'; else memattrs.xs = '1'; return memattrs;
// AArch32.CheckBreakpoint() // ========================= // Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32 // translation regime, when either debug exceptions are enabled, or halting debug is enabled // and halting is allowed. FaultRecord AArch32.CheckBreakpoint(bits(32) vaddress, integer size) assert ELUsingAArch32(S1TranslationRegime()); assert size IN {2,4}; match = FALSE; mismatch = FALSE; for i = 0 to NumBreakpointsImplemented() - 1 (match_i, mismatch_i) = AArch32.BreakpointMatch(i, vaddress, size); match = match || match_i; mismatch = mismatch || mismatch_i; if match && HaltOnBreakpointOrWatchpoint() then reason = DebugHalt_Breakpoint; Halt(reason); elsif (match || mismatch) then acctype = AccType_IFETCH; iswrite = FALSE; debugmoe = DebugException_Breakpoint; return AArch32.DebugFault(acctype, iswrite, debugmoe); else return NoFault();
// AArch32.CheckDebug() // ==================== // Called on each access to check for a debug exception or entry to Debug state. FaultRecord AArch32.CheckDebug(bits(32) vaddress, AccType acctype, boolean iswrite, integer size) FaultRecord fault = NoFault(); d_side = (acctype != AccType_IFETCH); generate_exception = AArch32.GenerateDebugExceptions() && DBGDSCRext.MDBGen == '1'; halt = HaltOnBreakpointOrWatchpoint(); // Relative priority of Vector Catch and Breakpoint exceptions not defined in the architecture vector_catch_first = ConstrainUnpredictableBool(Unpredictable_BPVECTORCATCHPRI); if !d_side && vector_catch_first && generate_exception then fault = AArch32.CheckVectorCatch(vaddress, size); if fault.statuscode == Fault_None && (generate_exception || halt) then if d_side then fault = AArch32.CheckWatchpoint(vaddress, acctype, iswrite, size); else fault = AArch32.CheckBreakpoint(vaddress, size); if fault.statuscode == Fault_None && !d_side && !vector_catch_first && generate_exception then return AArch32.CheckVectorCatch(vaddress, size); return fault;
// AArch32.CheckVectorCatch() // ========================== // Called before executing the instruction of length "size" bytes at "vaddress" in an AArch32 // translation regime, when debug exceptions are enabled. FaultRecord AArch32.CheckVectorCatch(bits(32) vaddress, integer size) assert ELUsingAArch32(S1TranslationRegime()); match = AArch32.VCRMatch(vaddress); if size == 4 && !match && AArch32.VCRMatch(vaddress + 2) then match = ConstrainUnpredictableBool(Unpredictable_VCMATCHHALF); if match then acctype = AccType_IFETCH; iswrite = FALSE; debugmoe = DebugException_VectorCatch; return AArch32.DebugFault(acctype, iswrite, debugmoe); else return NoFault();
// AArch32.CheckWatchpoint() // ========================= // Called before accessing the memory location of "size" bytes at "address", // when either debug exceptions are enabled for the access, or halting debug // is enabled and halting is allowed. FaultRecord AArch32.CheckWatchpoint(bits(32) vaddress, AccType acctype, boolean iswrite, integer size) assert ELUsingAArch32(S1TranslationRegime()); if acctype IN {AccType_TTW, AccType_IC, AccType_AT, AccType_ATPAN} then return NoFault(); if acctype == AccType_DC then if !iswrite then return NoFault(); elsif !(boolean IMPLEMENTATION_DEFINED "DCIMVAC generates watchpoint") then return NoFault(); match = FALSE; ispriv = AArch32.AccessUsesEL(acctype) != EL0; for i = 0 to NumWatchpointsImplemented() - 1 if AArch32.WatchpointMatch(i, vaddress, size, ispriv, acctype, iswrite) then match = TRUE; if match && HaltOnBreakpointOrWatchpoint() then reason = DebugHalt_Watchpoint; EDWAR = ZeroExtend(vaddress); Halt(reason); elsif match then debugmoe = DebugException_Watchpoint; return AArch32.DebugFault(acctype, iswrite, debugmoe); else return NoFault();
// AArch32.DebugFault() // ==================== // Return a fault record indicating a hardware watchpoint/breakpoint FaultRecord AArch32.DebugFault(AccType acctype, boolean iswrite, bits(4) debugmoe) FaultRecord fault; fault.statuscode = Fault_Debug; fault.acctype = acctype; fault.write = iswrite; fault.debugmoe = debugmoe; fault.secondstage = FALSE; fault.s2fs1walk = FALSE; return fault;
// AArch32.IPAIsOutOfRange() // ========================= // Check intermediate physical address bits not resolved by translation are ZERO boolean AArch32.IPAIsOutOfRange(S2TTWParams walkparams, bits(40) ipa) // Input Address size iasize = AArch32.S2IASize(walkparams.t0sz); return iasize < 40 && !IsZero(ipa<39:iasize>);
// AArch32.S1HasAlignmentFault() // ============================= // Returns whether stage 1 output fails alignment requirement on data accesses // to Device memory boolean AArch32.S1HasAlignmentFault(AccType acctype, boolean aligned, bit ntlsmd, MemoryAttributes memattrs) if acctype == AccType_IFETCH || memattrs.memtype != MemType_Device then return FALSE; if acctype == AccType_A32LSMD && ntlsmd == '0' && memattrs.device != DeviceType_GRE then return TRUE; return !aligned || acctype == AccType_DCZVA;
// AArch32.S1LDHasPermissionsFault() // ================================= // Returns whether an access using stage 1 long-descriptor translation // violates permissions of target memory boolean AArch32.S1LDHasPermissionsFault(Regime regime, SecurityState ss, S1TTWParams walkparams, Permissions perms, MemType memtype, PASpace paspace, boolean ispriv, AccType acctype, boolean iswrite) if HasUnprivileged(regime) then // Apply leaf permissions case perms.ap<2:1> of when '00' (pr,pw,ur,uw) = ('1','1','0','0'); // R/W at PL1 only when '01' (pr,pw,ur,uw) = ('1','1','1','1'); // R/W at any PL when '10' (pr,pw,ur,uw) = ('1','0','0','0'); // RO at PL1 only when '11' (pr,pw,ur,uw) = ('1','0','1','0'); // RO at any PL // Apply hierarchical permissions case perms.ap_table of when '00' (pr,pw,ur,uw) = ( pr, pw, ur, uw); // No effect when '01' (pr,pw,ur,uw) = ( pr, pw,'0','0'); // Privileged access when '10' (pr,pw,ur,uw) = ( pr,'0', ur,'0'); // Read-only when '11' (pr,pw,ur,uw) = ( pr,'0','0','0'); // Read-only, privileged access xn = perms.xn OR perms.xn_table; pxn = perms.pxn OR perms.pxn_table; ux = ur AND NOT(xn OR (uw AND walkparams.wxn)); px = pr AND NOT(xn OR pxn OR (pw AND walkparams.wxn) OR (uw AND walkparams.uwxn)); pan_access = !(acctype IN {AccType_DC, AccType_IFETCH, AccType_AT}); if HavePANExt() && pan_access then pan = PSTATE.PAN AND (ur OR uw); pr = pr AND NOT(pan); pw = pw AND NOT(pan); (r,w,x) = if ispriv then (pr,pw,px) else (ur,uw,ux); // Prevent execution from Non-secure space by PE in Secure state if SIF is set if ss == SS_Secure && paspace == PAS_NonSecure then x = x AND NOT(walkparams.sif); else // Apply leaf permissions case perms.ap<2> of when '0' (r,w) = ('1','1'); // No effect when '1' (r,w) = ('1','0'); // Read-only // Apply hierarchical permissions case perms.ap_table<1> of when '0' (r,w) = ( r , w ); // No effect when '1' (r,w) = ( r ,'0'); // Read-only xn = perms.xn OR perms.xn_table; x = NOT(xn OR (w AND walkparams.wxn)); if acctype == AccType_IFETCH then constraint = ConstrainUnpredictable(Unpredictable_INSTRDEVICE); if constraint == Constraint_FAULT && memtype == MemType_Device then return TRUE; else return x == '0'; elsif acctype IN {AccType_IC, AccType_DC} then return FALSE; elsif iswrite then return w == '0'; else return r == '0';
// AArch32.S1SDHasPermissionsFault() // ================================= // Returns whether an access using stage 1 short-descriptor translation // violates permissions of target memory boolean AArch32.S1SDHasPermissionsFault(Regime regime, SecurityState ss, Permissions perms, MemType memtype, PASpace paspace, boolean ispriv, AccType acctype, boolean iswrite) if regime == Regime_EL30 then sctlr = SCTLR_S; elsif HaveAArch32EL(EL3) then sctlr = SCTLR_NS; else sctlr = SCTLR; if sctlr.AFE == '0' then // Map Reserved encoding '100' if perms.ap == '100' then perms.ap = bits(3) IMPLEMENTATION_DEFINED "Reserved short descriptor AP encoding"; case perms.ap of when '000' (pr,pw,ur,uw) = ('0','0','0','0'); // No access when '001' (pr,pw,ur,uw) = ('1','1','0','0'); // R/W at PL1 only when '010' (pr,pw,ur,uw) = ('1','1','1','0'); // R/W at PL1, RO at PL0 when '011' (pr,pw,ur,uw) = ('1','1','1','1'); // R/W at any PL // '100' is reserved when '101' (pr,pw,ur,uw) = ('1','0','0','0'); // RO at PL1 only when '110' (pr,pw,ur,uw) = ('1','0','1','0'); // RO at any PL (deprecated) when '111' (pr,pw,ur,uw) = ('1','0','1','0'); // RO at any PL else // Simplified access permissions model case perms.ap<2:1> of when '00' (pr,pw,ur,uw) = ('1','1','0','0'); // R/W at PL1 only when '01' (pr,pw,ur,uw) = ('1','1','1','1'); // R/W at any PL when '10' (pr,pw,ur,uw) = ('1','0','0','0'); // RO at PL1 only when '11' (pr,pw,ur,uw) = ('1','0','1','0'); // RO at any PL ux = ur AND NOT(perms.xn OR (uw AND sctlr.WXN)); px = pr AND NOT(perms.xn OR perms.pxn OR (pw AND sctlr.WXN) OR (uw AND sctlr.UWXN)); pan_access = !(acctype IN {AccType_DC, AccType_IFETCH, AccType_AT}); if HavePANExt() && pan_access then pan = PSTATE.PAN AND (ur OR uw); pr = pr AND NOT(pan); pw = pw AND NOT(pan); (r,w,x) = if ispriv then (pr,pw,px) else (ur,uw,ux); // Prevent execution from Non-secure space by PE in Secure state if SIF is set if ss == SS_Secure && paspace == PAS_NonSecure then x = x AND NOT(if ELUsingAArch32(EL3) then SCR.SIF else SCR_EL3.SIF); if acctype == AccType_IFETCH then constraint = ConstrainUnpredictable(Unpredictable_INSTRDEVICE); if constraint == Constraint_FAULT && memtype == MemType_Device then return TRUE; else return x == '0'; elsif acctype IN {AccType_IC, AccType_DC} then return FALSE; elsif iswrite then return w == '0'; else return r == '0';
// AArch32.S2HasAlignmentFault() // ============================= // Returns whether stage 2 output fails alignment requirement on data accesses // to Device memory boolean AArch32.S2HasAlignmentFault(AccType acctype, boolean aligned, MemoryAttributes memattrs) if acctype == AccType_IFETCH || memattrs.memtype != MemType_Device then return FALSE; return !aligned || acctype == AccType_DCZVA;
// AArch32.S2HasPermissionsFault() // =============================== // Returns whether stage 2 access violates permissions of target memory boolean AArch32.S2HasPermissionsFault(boolean s2fs1walk, S2TTWParams walkparams, Permissions perms, MemType memtype, boolean ispriv, AccType acctype, boolean iswrite) r = perms.s2ap<0>; w = perms.s2ap<1>; if HaveExtendedExecuteNeverExt() then case perms.s2xn:perms.s2xnx of when '00' (px, ux) = ( r , r ); when '01' (px, ux) = ('0', r ); when '10' (px, ux) = ('0','0'); when '11' (px, ux) = ( r ,'0'); x = if ispriv then px else ux; else x = r AND NOT(perms.s2xn); if s2fs1walk && walkparams.ptw == '1' && memtype == MemType_Device then return TRUE; elsif acctype == AccType_IFETCH then constraint = ConstrainUnpredictable(Unpredictable_INSTRDEVICE); if constraint == Constraint_FAULT && memtype == MemType_Device then return TRUE; else return x == '0'; elsif acctype IN {AccType_IC, AccType_DC} then return FALSE; elsif iswrite then return w == '0'; else return r == '0';
// AArch32.S2InconsistentSL() // ========================== // Detect inconsistent configuration of stage 2 T0SZ and SL fields boolean AArch32.S2InconsistentSL(S2TTWParams walkparams) startlevel = AArch32.S2StartLevel(walkparams.sl0); levels = FINAL_LEVEL - startlevel; granulebits = TGxGranuleBits(walkparams.tgx); stride = granulebits - 3; // Input address size must at least be large enough to be resolved from the start level sl_min_iasize = ( levels * stride // Bits resolved by table walk, except initial level + granulebits // Bits directly mapped to output address + 1); // At least 1 more bit to be decoded by initial level // Can accomodate 1 more stride in the level + concatenation of up to 2^4 tables sl_max_iasize = sl_min_iasize + (stride-1) + 4; // Configured Input Address size iasize = AArch32.S2IASize(walkparams.t0sz); return iasize < sl_min_iasize || iasize > sl_max_iasize;
// AArch32.VAIsOutOfRange() // ======================== // Check virtual address bits not resolved by translation are identical // and of accepted value boolean AArch32.VAIsOutOfRange(Regime regime, S1TTWParams walkparams, bits(32) va) if regime == Regime_EL2 then // Input Address size iasize = AArch32.S1IASize(walkparams.t0sz); return walkparams.t0sz != '000' && !IsZero(va<31:iasize>); elsif walkparams.t1sz != '000' && walkparams.t0sz != '000' then // Lower range Input Address size lo_iasize = AArch32.S1IASize(walkparams.t0sz); // Upper range Input Address size up_iasize = AArch32.S1IASize(walkparams.t1sz); return !IsZero(va<31:lo_iasize>) && !IsOnes(va<31:up_iasize>); else return FALSE;
// AArch32.GetS1TLBContext() // ========================= // Gather translation context for accesses with VA to match against TLB entries TLBContext AArch32.GetS1TLBContext(Regime regime, SecurityState ss, bits(32) va) TLBContext tlbcontext; case regime of when Regime_EL2 tlbcontext = AArch32.TLBContextEL2(va); when Regime_EL10 tlbcontext = AArch32.TLBContextEL10(ss, va); when Regime_EL30 tlbcontext = AArch32.TLBContextEL30(va); tlbcontext.includes_s1 = TRUE; // The following may be amended for EL1&0 Regime if caching of stage 2 is successful tlbcontext.includes_s2 = FALSE; return tlbcontext;
// AArch32.GetS2TLBContext() // ========================= // Gather translation context for accesses with IPA to match against TLB entries TLBContext AArch32.GetS2TLBContext(FullAddress ipa) assert ipa.paspace == PAS_NonSecure; TLBContext tlbcontext; tlbcontext.ss = SS_NonSecure; tlbcontext.regime = Regime_EL10; tlbcontext.ipaspace = ipa.paspace; tlbcontext.vmid = ZeroExtend(VTTBR.VMID); tlbcontext.tg = TGx_4KB; tlbcontext.includes_s1 = FALSE; tlbcontext.includes_s2 = TRUE; tlbcontext.ia = ZeroExtend(ipa.address); tlbcontext.cnp = if HaveCommonNotPrivateTransExt() then VTTBR.CnP else '0'; return tlbcontext;
// AArch32.TLBContextEL10() // ======================== // Gather translation context for accesses under EL10 regime // (PL10 when EL3 is A64) to match against TLB entries TLBContext AArch32.TLBContextEL10(SecurityState ss, bits(32) va) TLBContext tlbcontext; if HaveAArch32EL(EL3) then ttbcr = TTBCR_NS; ttbr0 = TTBR0_NS; ttbr1 = TTBR1_NS; contextidr = CONTEXTIDR_NS; else ttbcr = TTBCR; ttbr0 = TTBR0; ttbr1 = TTBR1; contextidr = CONTEXTIDR; tlbcontext.ss = ss; tlbcontext.regime = Regime_EL10; if AArch32.EL2Enabled(ss) then tlbcontext.vmid = ZeroExtend(VTTBR.VMID); if ttbcr.EAE == '1' then tlbcontext.asid = ZeroExtend(if ttbcr.A1 == '0' then ttbr0.ASID else ttbr1.ASID); else tlbcontext.asid = ZeroExtend(contextidr.ASID); tlbcontext.tg = TGx_4KB; tlbcontext.ia = ZeroExtend(va); if HaveCommonNotPrivateTransExt() && ttbcr.EAE == '1' then if AArch32.GetVARange(va, ttbcr.T0SZ, ttbcr.T1SZ) == VARange_LOWER then tlbcontext.cnp = ttbr0.CnP; else tlbcontext.cnp = ttbr1.CnP; else tlbcontext.cnp = '0'; return tlbcontext;
// AArch32.TLBContextEL2() // ======================= // Gather translation context for accesses under EL2 regime to match against TLB entries TLBContext AArch32.TLBContextEL2(bits(32) va) TLBContext tlbcontext; tlbcontext.ss = SS_NonSecure; tlbcontext.regime = Regime_EL2; tlbcontext.ia = ZeroExtend(va); tlbcontext.tg = TGx_4KB; tlbcontext.cnp = if HaveCommonNotPrivateTransExt() then HTTBR.CnP else '0'; return tlbcontext;
// AArch32.TLBContextEL30() // ======================== // Gather translation context for accesses under EL30 regime // (PL10 in Secure state and EL3 is A32) to match against TLB entries TLBContext AArch32.TLBContextEL30(bits(32) va) TLBContext tlbcontext; tlbcontext.ss = SS_Secure; tlbcontext.regime = Regime_EL30; if TTBCR_S.EAE == '1' then tlbcontext.asid = ZeroExtend(if TTBCR_S.A1 == '0' then TTBR0_S.ASID else TTBR1_S.ASID); else tlbcontext.asid = ZeroExtend(CONTEXTIDR_S.ASID); tlbcontext.tg = TGx_4KB; tlbcontext.ia = ZeroExtend(va); if HaveCommonNotPrivateTransExt() && TTBCR_S.EAE == '1' then if AArch32.GetVARange(va, TTBCR_S.T0SZ, TTBCR_S.T1SZ) == VARange_LOWER then tlbcontext.cnp = TTBR0_S.CnP; else tlbcontext.cnp = TTBR1_S.CnP; else tlbcontext.cnp = '0'; return tlbcontext;
// AArch32.AccessUsesEL() // ====================== // Determine the privilege associated with the access bits(2) AArch32.AccessUsesEL(AccType acctype) if acctype == AccType_UNPRIV then return EL0; else return PSTATE.EL;
// AArch32.EL2Enabled() // ==================== // Returns whether EL2 is enabled for the given Security State boolean AArch32.EL2Enabled(SecurityState ss) if ss == SS_Secure then if !(HaveEL(EL2) && HaveSecureEL2Ext()) then return FALSE; elsif HaveEL(EL3) then return SCR_EL3.EEL2 == '1'; else return boolean IMPLEMENTATION_DEFINED "Secure-only implementation"; else return HaveEL(EL2);
// AArch32.FullTranslate() // ======================= // Perform address translation as specified by VMSA-A32 AddressDescriptor AArch32.FullTranslate(bits(32) va, AccType acctype, boolean iswrite, boolean aligned) // Prepare fault fields in case a fault is detected fault = NoFault(); fault.acctype = acctype; fault.write = iswrite; regime = TranslationRegime(PSTATE.EL, acctype); ispriv = PSTATE.EL != EL0 && acctype != AccType_UNPRIV; ss = SecurityStateForRegime(regime); // First Stage Translation if regime == Regime_EL2 || TTBCR.EAE == '1' then (fault, ipa) = AArch32.S1TranslateLD(fault, regime, ss, va, acctype, aligned, iswrite, ispriv); else (fault, ipa, -) = AArch32.S1TranslateSD(fault, regime, ss, va, acctype, aligned, iswrite, ispriv); if fault.statuscode != Fault_None then return CreateFaultyAddressDescriptor(ZeroExtend(va), fault); if regime == Regime_EL10 && EL2Enabled() then ipa.vaddress = ZeroExtend(va); s2fs1walk = FALSE; (fault, pa) = AArch32.S2Translate(fault, ipa, ss, s2fs1walk, acctype, aligned, iswrite, ispriv); if fault.statuscode != Fault_None then return CreateFaultyAddressDescriptor(ZeroExtend(va), fault); else return pa; else return ipa;
// AArch32.OutputDomain() // ====================== // Determine the domain the translated output address bits(2) AArch32.OutputDomain(Regime regime, bits(4) domain) index = 2 * UInt(domain); if regime == Regime_EL30 then Dn = DACR_S<index+1:index>; elsif HaveAArch32EL(EL3) then Dn = DACR_NS<index+1:index>; else Dn = DACR<index+1:index>; if Dn == '10' then // Reserved value maps to an allocated value (-, Dn) = ConstrainUnpredictableBits(Unpredictable_RESDACR); return Dn;
// AArch32.S1DisabledOutput() // ========================== // Flat map the VA to IPA/PA, depending on the regime, assigning default memory attributes (FaultRecord, AddressDescriptor) AArch32.S1DisabledOutput(FaultRecord fault, Regime regime, SecurityState ss, bits(32) va, AccType acctype, boolean aligned) // No memory page is guarded when stage 1 address translation is disabled SetInGuardedPage(FALSE); MemoryAttributes memattrs; if regime == Regime_EL10 && AArch32.EL2Enabled(ss) then if ELStateUsingAArch32(EL2, ss == SS_Secure) then default_cacheable = HCR.DC; else default_cacheable = HCR_EL2.DC; else default_cacheable = '0'; if default_cacheable == '1' then // Use default cacheable settings memattrs.memtype = MemType_Normal; memattrs.inner.attrs = MemAttr_WB; memattrs.inner.hints = MemHint_RWA; memattrs.outer.attrs = MemAttr_WB; memattrs.outer.hints = MemHint_RWA; memattrs.shareability = Shareability_NSH; if !ELStateUsingAArch32(EL2, ss == SS_Secure) && HaveMTE2Ext() then memattrs.tagged = HCR_EL2.DCT == '1'; else memattrs.tagged = FALSE; elsif acctype == AccType_IFETCH then memattrs.memtype = MemType_Normal; memattrs.shareability = Shareability_OSH; memattrs.tagged = FALSE; if AArch32.S1ICacheEnabled(regime) then memattrs.inner.attrs = MemAttr_WT; memattrs.inner.hints = MemHint_RA; memattrs.outer.attrs = MemAttr_WT; memattrs.outer.hints = MemHint_RA; else memattrs.inner.attrs = MemAttr_NC; memattrs.outer.attrs = MemAttr_NC; else // Treat memory region as Device memattrs.memtype = MemType_Device; memattrs.device = DeviceType_nGnRnE; memattrs.shareability = Shareability_OSH; memattrs.tagged = FALSE; if HaveTrapLoadStoreMultipleDeviceExt() then case regime of when Regime_EL30 ntlsmd = SCTLR_S.nTLSMD; when Regime_EL2 ntlsmd = HSCTLR.nTLSMD; when Regime_EL10 ntlsmd = if HaveAArch32EL(EL3) then SCTLR_NS.nTLSMD else SCTLR.nTLSMD; else ntlsmd = '1'; if AArch32.S1HasAlignmentFault(acctype, aligned, ntlsmd, memattrs) then fault.statuscode = Fault_Alignment; return (fault, AddressDescriptor UNKNOWN); FullAddress oa; oa.address = ZeroExtend(va); oa.paspace = if ss == SS_Secure then PAS_Secure else PAS_NonSecure; ipa = CreateAddressDescriptor(ZeroExtend(va), oa, memattrs); return (fault, ipa);
// AArch32.S1Enabled() // =================== // Returns whether stage 1 translation is enabled for the active translation regime boolean AArch32.S1Enabled(Regime regime, SecurityState ss) if regime == Regime_EL2 then return HSCTLR.M == '1'; elsif regime == Regime_EL30 then return SCTLR_S.M == '1'; elsif !AArch32.EL2Enabled(ss) then return (if HaveAArch32EL(EL3) then SCTLR_NS.M else SCTLR.M) == '1'; elsif ELStateUsingAArch32(EL2, ss == SS_Secure) then return HCR.<TGE,DC> == '00' && (if HaveAArch32EL(EL3) then SCTLR_NS.M else SCTLR.M) == '1'; else return HCR_EL2.<TGE,DC> == '00' && SCTLR.M == '1';
// AArch32.S1TranslateLD() // ======================= // Perform a stage 1 translation using long-descriptor format mapping VA to IPA/PA // depending on the regime (FaultRecord, AddressDescriptor) AArch32.S1TranslateLD(FaultRecord fault, Regime regime, SecurityState ss, bits(32) va, AccType acctype, boolean aligned, boolean iswrite, boolean ispriv) fault.secondstage = FALSE; fault.s2fs1walk = FALSE; if !AArch32.S1Enabled(regime, ss) then return AArch32.S1DisabledOutput(fault, regime, ss, va, acctype, aligned); walkparams = AArch32.GetS1TTWParams(regime, va); if AArch32.VAIsOutOfRange(regime, walkparams, va) then fault.level = 1; fault.statuscode = Fault_Translation; return (fault, AddressDescriptor UNKNOWN); (fault, walkstate) = AArch32.S1WalkLD(fault, regime, ss, walkparams, va, ispriv); if fault.statuscode != Fault_None then return (fault, AddressDescriptor UNKNOWN); SetInGuardedPage(FALSE); // AArch32-VMSA does not guard any pages if AArch32.S1HasAlignmentFault(acctype, aligned, walkparams.ntlsmd, walkstate.memattrs) then fault.statuscode = Fault_Alignment; elsif IsAtomicRW(acctype) then if AArch32.S1LDHasPermissionsFault(regime, ss, walkparams, walkstate.permissions, walkstate.memattrs.memtype, walkstate.baseaddress.paspace, ispriv, acctype, FALSE) then // The permission fault was not caused by lack of write permissions fault.statuscode = Fault_Permission; fault.write = FALSE; elsif AArch32.S1LDHasPermissionsFault(regime, ss, walkparams, walkstate.permissions, walkstate.memattrs.memtype, walkstate.baseaddress.paspace, ispriv, acctype, TRUE) then // The permission fault _was_ caused by lack of write permissions fault.statuscode = Fault_Permission; fault.write = TRUE; elsif AArch32.S1LDHasPermissionsFault(regime, ss, walkparams, walkstate.permissions, walkstate.memattrs.memtype, walkstate.baseaddress.paspace, ispriv, acctype, iswrite) then fault.statuscode = Fault_Permission; if fault.statuscode != Fault_None then return (fault, AddressDescriptor UNKNOWN); if ((acctype == AccType_IFETCH && (walkstate.memattrs.memtype == MemType_Device || !AArch32.S1ICacheEnabled(regime))) || (acctype != AccType_IFETCH && walkstate.memattrs.memtype == MemType_Normal && !AArch32.S1DCacheEnabled(regime))) then // Treat memory attributes as Normal Non-Cacheable memattrs = NormalNCMemAttr(); memattrs.xs = walkstate.memattrs.xs; else memattrs = walkstate.memattrs; // Shareability value of stage 1 translation subject to stage 2 is IMPLEMENTATION DEFINED // to be either effective value or descriptor value if (regime == Regime_EL10 && AArch32.EL2Enabled(ss) && (if ELStateUsingAArch32(EL2, ss == SS_Secure) then HCR.VM else HCR_EL2.VM) == '1' && !(boolean IMPLEMENTATION_DEFINED "Apply effective shareability at stage 1")) then memattrs.shareability = walkstate.memattrs.shareability; else memattrs.shareability = EffectiveShareability(memattrs); // Output Address oa = StageOA(ZeroExtend(va), walkparams.tgx, walkstate); ipa = CreateAddressDescriptor(ZeroExtend(va), oa, memattrs); return (fault, ipa);
// AArch32.S1TranslateSD() // ======================= // Perform a stage 1 translation using short-descriptor format mapping VA to IPA/PA // depending on the regime (FaultRecord, AddressDescriptor, SDFType) AArch32.S1TranslateSD(FaultRecord fault, Regime regime, SecurityState ss, bits(32) va, AccType acctype, boolean aligned, boolean iswrite, boolean ispriv) fault.secondstage = FALSE; fault.s2fs1walk = FALSE; if !AArch32.S1Enabled(regime, ss) then (fault, ipa) = AArch32.S1DisabledOutput(fault, regime, ss, va, acctype, aligned); return (fault, ipa, SDFType UNKNOWN); (fault, walkstate) = AArch32.S1WalkSD(fault, regime, ss, va, ispriv); if fault.statuscode != Fault_None then return (fault, AddressDescriptor UNKNOWN, SDFType UNKNOWN); domain = AArch32.OutputDomain(regime, walkstate.domain); SetInGuardedPage(FALSE); // AArch32-VMSA does not guard any pages if HaveTrapLoadStoreMultipleDeviceExt() then case regime of when Regime_EL30 ntlsmd = SCTLR_S.nTLSMD; when Regime_EL10 ntlsmd = if HaveAArch32EL(EL3) then SCTLR_NS.nTLSMD else SCTLR.nTLSMD; else ntlsmd = '1'; if AArch32.S1HasAlignmentFault(acctype, aligned, ntlsmd, walkstate.memattrs) then fault.statuscode = Fault_Alignment; elsif !(acctype IN {AccType_IC, AccType_DC}) && domain == Domain_NoAccess then fault.statuscode = Fault_Domain; elsif domain == Domain_Client then if IsAtomicRW(acctype) then if AArch32.S1SDHasPermissionsFault(regime, ss, walkstate.permissions, walkstate.memattrs.memtype, walkstate.baseaddress.paspace, ispriv, acctype, FALSE) then // The permission fault was not caused by lack of write permissions fault.statuscode = Fault_Permission; fault.write = FALSE; elsif AArch32.S1SDHasPermissionsFault(regime, ss, walkstate.permissions, walkstate.memattrs.memtype, walkstate.baseaddress.paspace, ispriv, acctype, TRUE) then // The permission fault _was_ caused by lack of write permissions fault.statuscode = Fault_Permission; fault.write = TRUE; elsif AArch32.S1SDHasPermissionsFault(regime, ss, walkstate.permissions, walkstate.memattrs.memtype, walkstate.baseaddress.paspace, ispriv, acctype, iswrite) then fault.statuscode = Fault_Permission; if fault.statuscode != Fault_None then fault.domain = walkstate.domain; return (fault, AddressDescriptor UNKNOWN, walkstate.sdftype); if ((acctype == AccType_IFETCH && (walkstate.memattrs.memtype == MemType_Device || !AArch32.S1ICacheEnabled(regime))) || (acctype != AccType_IFETCH && walkstate.memattrs.memtype == MemType_Normal && !AArch32.S1DCacheEnabled(regime))) then // Treat memory attributes as Normal Non-Cacheable memattrs = NormalNCMemAttr(); memattrs.xs = walkstate.memattrs.xs; else memattrs = walkstate.memattrs; // Shareability value of stage 1 translation subject to stage 2 is IMPLEMENTATION DEFINED // to be either effective value or descriptor value if (regime == Regime_EL10 && AArch32.EL2Enabled(ss) && (if ELStateUsingAArch32(EL2, ss == SS_Secure) then HCR.VM else HCR_EL2.VM) == '1' && !(boolean IMPLEMENTATION_DEFINED "Apply effective shareability at stage 1")) then memattrs.shareability = walkstate.memattrs.shareability; else memattrs.shareability = EffectiveShareability(memattrs); // Output Address oa = AArch32.SDStageOA(walkstate.baseaddress, va, walkstate.sdftype); ipa = CreateAddressDescriptor(ZeroExtend(va), oa, memattrs); return (fault, ipa, walkstate.sdftype);
// AArch32.S2Translate() // ===================== // Perform a stage 2 translation mapping an IPA to a PA (FaultRecord, AddressDescriptor) AArch32.S2Translate(FaultRecord fault, AddressDescriptor ipa, SecurityState ss, boolean s2fs1walk, AccType acctype, boolean aligned, boolean iswrite, boolean ispriv) assert IsZero(ipa.paddress.address<51:40>); if !ELStateUsingAArch32(EL2, ss == SS_Secure) then s1aarch64 = FALSE; return AArch64.S2Translate(fault, ipa, s1aarch64, ss, s2fs1walk, acctype, aligned, iswrite, ispriv); // Prepare fault fields in case a fault is detected fault.statuscode = Fault_None; fault.secondstage = TRUE; fault.s2fs1walk = s2fs1walk; fault.ipaddress = ipa.paddress; walkparams = AArch32.GetS2TTWParams(); if walkparams.vm == '0' then // Stage 2 is disabled return (fault, ipa); if AArch32.IPAIsOutOfRange(walkparams, ipa.paddress.address<39:0>) then fault.statuscode = Fault_Translation; fault.level = 1; return (fault, AddressDescriptor UNKNOWN); (fault, walkstate) = AArch32.S2Walk(fault, walkparams, ipa); if fault.statuscode != Fault_None then return (fault, AddressDescriptor UNKNOWN); if AArch32.S2HasAlignmentFault(acctype, aligned, walkstate.memattrs) then fault.statuscode = Fault_Alignment; elsif IsAtomicRW(acctype) then assert !s2fs1walk; // AArch32 does not support HW update of TT if AArch32.S2HasPermissionsFault(s2fs1walk, walkparams, walkstate.permissions, walkstate.memattrs.memtype, ispriv, acctype, FALSE) then // The permission fault was not caused by lack of write permissions fault.statuscode = Fault_Permission; fault.write = FALSE; elsif AArch32.S2HasPermissionsFault(s2fs1walk, walkparams, walkstate.permissions, walkstate.memattrs.memtype, ispriv, acctype, TRUE) then // The permission fault _was_ caused by lack of write permissions fault.statuscode = Fault_Permission; fault.write = TRUE; elsif AArch32.S2HasPermissionsFault(s2fs1walk, walkparams, walkstate.permissions, walkstate.memattrs.memtype, ispriv, acctype, iswrite) then fault.statuscode = Fault_Permission; if ((s2fs1walk && walkstate.memattrs.memtype == MemType_Device) || (acctype == AccType_IFETCH && (walkstate.memattrs.memtype == MemType_Device || HCR2.ID == '1')) || (acctype != AccType_IFETCH && walkstate.memattrs.memtype == MemType_Normal && HCR2.CD == '1')) then // Treat memory attributes as Normal Non-Cacheable s2_memattrs = NormalNCMemAttr(); s2_memattrs.xs = walkstate.memattrs.xs; else s2_memattrs = walkstate.memattrs; memattrs = S2CombineS1MemAttrs(ipa.memattrs, s2_memattrs); ipa_64 = ZeroExtend(ipa.paddress.address<39:0>, 64); // Output Address oa = StageOA(ipa_64, walkparams.tgx, walkstate); pa = CreateAddressDescriptor(ipa.vaddress, oa, memattrs); return (fault, pa);
// AArch32.SDStageOA() // =================== // Given the final walk state of a short-descriptor translation walk, // map the untranslated input address bits to the base output address FullAddress AArch32.SDStageOA(FullAddress baseaddress, bits(32) va, SDFType sdftype) case sdftype of when SDFType_SmallPage tsize = 12; when SDFType_LargePage tsize = 16; when SDFType_Section tsize = 20; when SDFType_Supersection tsize = 24; // Output Address FullAddress oa; oa.address = baseaddress.address<51:tsize>:va<tsize-1:0>; oa.paspace = baseaddress.paspace; return oa;
// AArch32.TranslateAddress() // ========================== // Main entry point for translating an address AddressDescriptor AArch32.TranslateAddress(bits(32) va, AccType acctype, boolean iswrite, boolean aligned, integer size) regime = TranslationRegime(PSTATE.EL, acctype); if !RegimeUsingAArch32(regime) then return AArch64.TranslateAddress(ZeroExtend(va, 64), acctype, iswrite, aligned, size); result = AArch32.FullTranslate(va, acctype, iswrite, aligned); if !IsFault(result) then result.fault = AArch32.CheckDebug(va, acctype, iswrite, size); // Update virtual address for abort functions result.vaddress = ZeroExtend(va); return result;
// AArch32.DecodeDescriptorTypeLD() // ================================ // Determine whether the long-descriptor is a page, block or table DescriptorType AArch32.DecodeDescriptorTypeLD(bits(64) descriptor, integer level) if descriptor<1:0> == '11' && level == FINAL_LEVEL then return DescriptorType_Page; elsif descriptor<1:0> == '11' then return DescriptorType_Table; elsif descriptor<1:0> == '01' && level != FINAL_LEVEL then return DescriptorType_Block; else return DescriptorType_Invalid;
// AArch32.DecodeDescriptorTypeSD() // ================================ // Determine the type of the short-descriptor SDFType AArch32.DecodeDescriptorTypeSD(bits(32) descriptor, integer level) if level == 1 && descriptor<1:0> == '01' then return SDFType_Table; elsif level == 1 && descriptor<18,1> == '01' then return SDFType_Section; elsif level == 1 && descriptor<18,1> == '11' then return SDFType_Supersection; elsif level == 2 && descriptor<1:0> == '01' then return SDFType_LargePage; elsif level == 2 && descriptor<1:0> == '1x' then return SDFType_SmallPage; else return SDFType_Invalid;
// AArch32.S1IASize() // ================== // Retrieve the number of bits containing the input address for stage 1 translation integer AArch32.S1IASize(bits(3) txsz) return 32 - UInt(txsz);
// AArch32.S1WalkLD() // ================== // Traverse stage 1 translation tables in long format to obtain the final descriptor (FaultRecord, TTWState) AArch32.S1WalkLD(FaultRecord fault, Regime regime, SecurityState ss, S1TTWParams walkparams, bits(32) va, boolean ispriv) if regime == Regime_EL2 then ttbr = HTTBR; txsz = walkparams.t0sz; else varange = AArch32.GetVARange(va, walkparams.t0sz, walkparams.t1sz); if regime == Regime_EL30 then ttbcr = TTBCR_S; ttbr0 = TTBR0_S; ttbr1 = TTBR1_S; elsif HaveAArch32EL(EL3) then ttbcr = TTBCR_NS; ttbr0 = TTBR0_NS; ttbr1 = TTBR1_NS; else ttbcr = TTBCR; ttbr0 = TTBR0; ttbr1 = TTBR1; assert ttbcr.EAE == '1'; if varange == VARange_LOWER then txsz = walkparams.t0sz; ttbr = ttbr0; epd = ttbcr.EPD0; else txsz = walkparams.t1sz; ttbr = ttbr1; epd = ttbcr.EPD1; if regime != Regime_EL2 && epd == '1' then fault.level = 1; fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); // Input Address size iasize = AArch32.S1IASize(txsz); granulebits = TGxGranuleBits(walkparams.tgx); stride = granulebits - 3; startlevel = FINAL_LEVEL - (((iasize-1) - granulebits) DIV stride); levels = FINAL_LEVEL - startlevel; if !IsZero(ttbr<47:40>) then fault.statuscode = Fault_AddressSize; fault.level = 0; return (fault, TTWState UNKNOWN); FullAddress baseaddress; baselsb = iasize - (levels*stride + granulebits) + 3; baseaddress.paspace = if ss == SS_Secure then PAS_Secure else PAS_NonSecure; baseaddress.address = ZeroExtend(ttbr<39:baselsb>:Zeros(baselsb)); TTWState walkstate; walkstate.baseaddress = baseaddress; walkstate.level = startlevel; walkstate.istable = TRUE; // In regimes that support global and non-global translations, translation // table entries from lookup levels other than the final level of lookup // are treated as being non-global walkstate.nG = if HasUnprivileged(regime) then '1' else '0'; walkstate.memattrs = WalkMemAttrs(walkparams.sh, walkparams.irgn, walkparams.orgn); walkstate.permissions.ap_table = '00'; walkstate.permissions.xn_table = '0'; walkstate.permissions.pxn_table = '0'; indexmsb = iasize - 1; bits(64) descriptor; AddressDescriptor walkaddress; repeat fault.level = walkstate.level; indexlsb = (FINAL_LEVEL - walkstate.level)*stride + granulebits; bits(40) index = ZeroExtend(va<indexmsb:indexlsb>:'000'); // VA is needed in the case of reporting an external abort walkaddress.vaddress = ZeroExtend(va); walkaddress.paddress.address = walkstate.baseaddress.address OR ZeroExtend(index); walkaddress.paddress.paspace = walkstate.baseaddress.paspace; if !AArch32.S1DCacheEnabled(regime) then walkaddress.memattrs = NormalNCMemAttr(); walkaddress.memattrs.xs = walkstate.memattrs.xs; else walkaddress.memattrs = walkstate.memattrs; // Shareability value of stage 1 translation subject to stage 2 is IMPLEMENTATION DEFINED // to be either effective value or descriptor value if (regime == Regime_EL10 && AArch32.EL2Enabled(ss) && (if ELStateUsingAArch32(EL2, ss == SS_Secure) then HCR.VM else HCR_EL2.VM) == '1' && !(boolean IMPLEMENTATION_DEFINED "Apply effective shareability at stage 1")) then walkaddress.memattrs.shareability = walkstate.memattrs.shareability; else walkaddress.memattrs.shareability = EffectiveShareability(walkaddress.memattrs); // If there are two stages of translation, then the first stage table walk addresses // are themselves subject to translation if regime == Regime_EL10 && AArch32.EL2Enabled(ss) then s2fs1walk = TRUE; s2acctype = AccType_TTW; s2aligned = TRUE; s2write = FALSE; (s2fault, s2walkaddress) = AArch32.S2Translate(fault, walkaddress, ss, s2fs1walk, s2acctype, s2aligned, s2write, ispriv); // Check for a fault on the stage 2 walk if s2fault.statuscode != Fault_None then return (s2fault, TTWState UNKNOWN); (fault, descriptor) = FetchDescriptor(walkparams.ee, s2walkaddress, fault); else (fault, descriptor) = FetchDescriptor(walkparams.ee, walkaddress, fault); if fault.statuscode != Fault_None then return (fault, TTWState UNKNOWN); desctype = AArch32.DecodeDescriptorTypeLD(descriptor, walkstate.level); case desctype of when DescriptorType_Table if !IsZero(descriptor<47:40>) then fault.statuscode = Fault_AddressSize; return (fault, TTWState UNKNOWN); walkstate.baseaddress.address = ZeroExtend(descriptor<39:12>:Zeros(12)); if walkstate.baseaddress.paspace == PAS_Secure && descriptor<63> == '1' then walkstate.baseaddress.paspace = PAS_NonSecure; if walkparams.hpd == '0' then walkstate.permissions.xn_table = (walkstate.permissions.xn_table OR descriptor<60>); walkstate.permissions.ap_table = (walkstate.permissions.ap_table OR descriptor<62:61>); walkstate.permissions.pxn_table = (walkstate.permissions.pxn_table OR descriptor<59>); walkstate.level = walkstate.level + 1; indexmsb = indexlsb - 1; when DescriptorType_Invalid fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); when DescriptorType_Page, DescriptorType_Block walkstate.istable = FALSE; until desctype IN {DescriptorType_Page, DescriptorType_Block}; // Check the output address is inside the supported range if !IsZero(descriptor<47:40>) then fault.statuscode = Fault_AddressSize; return (fault, TTWState UNKNOWN); // Check the access flag if descriptor<10> == '0' then fault.statuscode = Fault_AccessFlag; return (fault, TTWState UNKNOWN); walkstate.permissions.xn = descriptor<54>; walkstate.permissions.pxn = descriptor<53>; walkstate.permissions.ap = descriptor<7:6>:'1'; walkstate.contiguous = descriptor<52>; if regime == Regime_EL2 then // All EL2 regime accesses are treated as Global walkstate.nG = '0'; elsif ss == SS_Secure && walkstate.baseaddress.paspace == PAS_NonSecure then // When a PE is using the Long-descriptor translation table format, // and is in Secure state, a translation must be treated as non-global, // regardless of the value of the nG bit, // if NSTable is set to 1 at any level of the translation table walk. walkstate.nG = '1'; else walkstate.nG = descriptor<11>; walkstate.baseaddress.address = ZeroExtend(descriptor<39:indexlsb>:Zeros(indexlsb)); if walkstate.baseaddress.paspace == PAS_Secure && descriptor<5> == '1' then walkstate.baseaddress.paspace = PAS_NonSecure; memattr = descriptor<4:2>; sh = descriptor<9:8>; attr = MAIRAttr(UInt(memattr), walkparams.mair); s1aarch64 = FALSE; walkstate.memattrs = S1DecodeMemAttrs(attr, sh, s1aarch64); return (fault, walkstate);
// AArch32.S1WalkSD() // ================== // Traverse stage 1 translation tables in short format to obtain the final descriptor (FaultRecord, TTWState) AArch32.S1WalkSD(FaultRecord fault, Regime regime, SecurityState ss, bits(32) va, boolean ispriv) // Determine correct translation control registers to use. if regime == Regime_EL30 then sctlr = SCTLR_S; ttbcr = TTBCR_S; ttbr0 = TTBR0_S; ttbr1 = TTBR1_S; elsif HaveAArch32EL(EL3) then sctlr = SCTLR_NS; ttbcr = TTBCR_NS; ttbr0 = TTBR0_NS; ttbr1 = TTBR1_NS; else sctlr = SCTLR; ttbcr = TTBCR; ttbr0 = TTBR0; ttbr1 = TTBR1; assert ttbcr.EAE == '0'; ee = sctlr.EE; afe = sctlr.AFE; tre = sctlr.TRE; n = UInt(ttbcr.N); if n == 0 || IsZero(va<31:(32-n)>) then ttb = ttbr0.TTB0:Zeros(7); pd = ttbcr.PD0; irgn = ttbr0.IRGN; rgn = ttbr0.RGN; s = ttbr0.S; nos = ttbr0.NOS; else n = 0; // TTBR1 translation always treats N as 0 ttb = ttbr1.TTB1:Zeros(7); pd = ttbcr.PD1; irgn = ttbr1.IRGN; rgn = ttbr1.RGN; s = ttbr1.S; nos = ttbr1.NOS; // Check if Translation table walk disabled for translations with this Base register. if pd == '1' then fault.level = 1; fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); FullAddress baseaddress; baseaddress.paspace = if ss == SS_Secure then PAS_Secure else PAS_NonSecure; baseaddress.address = ZeroExtend(ttb<31:14-n>:Zeros(14-n)); TTWState walkstate; walkstate.baseaddress = baseaddress; // In regimes that support global and non-global translations, translation // table entries from lookup levels other than the final level of lookup // are treated as being non-global. Translations in Short-Descriptor Format // always support global & non-global translations. walkstate.nG = '1'; walkstate.memattrs = WalkMemAttrs(s:nos, irgn, rgn); walkstate.level = 1; walkstate.istable = TRUE; bits(4) domain; bits(32) descriptor; AddressDescriptor walkaddress; repeat fault.level = walkstate.level; bits(32) index; if walkstate.level == 1 then index = ZeroExtend(va<31-n:20>:'00'); else index = ZeroExtend(va<19:12>:'00'); walkaddress.vaddress = ZeroExtend(va); walkaddress.paddress.address = walkstate.baseaddress.address OR ZeroExtend(index); walkaddress.paddress.paspace = walkstate.baseaddress.paspace; if !AArch32.S1DCacheEnabled(regime) then walkaddress.memattrs = NormalNCMemAttr(); walkaddress.memattrs.xs = walkstate.memattrs.xs; else walkaddress.memattrs = walkstate.memattrs; // Shareability value of stage 1 translation subject to stage 2 is IMPLEMENTATION DEFINED // to be either effective value or descriptor value if (regime == Regime_EL10 && AArch32.EL2Enabled(ss) && (if ELStateUsingAArch32(EL2, ss == SS_Secure) then HCR.VM else HCR_EL2.VM) == '1' && !(boolean IMPLEMENTATION_DEFINED "Apply effective shareability at stage 1")) then walkaddress.memattrs.shareability = walkstate.memattrs.shareability; else walkaddress.memattrs.shareability = EffectiveShareability(walkaddress.memattrs); if regime == Regime_EL10 && AArch32.EL2Enabled(ss) then s2fs1walk = TRUE; s2acctype = AccType_TTW; s2aligned = TRUE; s2write = FALSE; (s2fault, s2walkaddress) = AArch32.S2Translate(fault, walkaddress, ss, s2fs1walk, s2acctype, s2aligned, s2write, ispriv); if s2fault.statuscode != Fault_None then return (s2fault, TTWState UNKNOWN); (fault, descriptor) = FetchDescriptor(ee, s2walkaddress, fault); else (fault, descriptor) = FetchDescriptor(ee, walkaddress, fault); if fault.statuscode != Fault_None then return (fault, TTWState UNKNOWN); walkstate.sdftype = AArch32.DecodeDescriptorTypeSD(descriptor, walkstate.level); case walkstate.sdftype of when SDFType_Invalid fault.domain = domain; fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); when SDFType_Table domain = descriptor<8:5>; ns = descriptor<3>; pxn = descriptor<2>; walkstate.baseaddress.address = ZeroExtend(descriptor<31:10>:Zeros(10)); walkstate.level = 2; when SDFType_SmallPage nG = descriptor<11>; s = descriptor<10>; ap = descriptor<9,5:4>; tex = descriptor<8:6>; c = descriptor<3>; b = descriptor<2>; xn = descriptor<0>; walkstate.baseaddress.address = ZeroExtend(descriptor<31:12>:Zeros(12)); walkstate.istable = FALSE; when SDFType_LargePage xn = descriptor<15>; tex = descriptor<14:12>; nG = descriptor<11>; s = descriptor<10>; ap = descriptor<9,5:4>; c = descriptor<3>; b = descriptor<2>; walkstate.baseaddress.address = ZeroExtend(descriptor<31:16>:Zeros(16)); walkstate.istable = FALSE; when SDFType_Section ns = descriptor<19>; nG = descriptor<17>; s = descriptor<16>; ap = descriptor<15,11:10>; tex = descriptor<14:12>; domain = descriptor<8:5>; xn = descriptor<4>; c = descriptor<3>; b = descriptor<2>; pxn = descriptor<0>; walkstate.baseaddress.address = ZeroExtend(descriptor<31:20>:Zeros(20)); walkstate.istable = FALSE; when SDFType_Supersection ns = descriptor<19>; nG = descriptor<17>; s = descriptor<16>; ap = descriptor<15,11:10>; tex = descriptor<14:12>; xn = descriptor<4>; c = descriptor<3>; b = descriptor<2>; pxn = descriptor<0>; domain = '0000'; walkstate.baseaddress.address = ZeroExtend(descriptor<8:5,23:20,31:24>:Zeros(24)); walkstate.istable = FALSE; until walkstate.sdftype != SDFType_Table; if afe == '1' && ap<0> == '0' then fault.domain = domain; fault.statuscode = Fault_AccessFlag; return (fault, TTWState UNKNOWN); // Decode the TEX, C, B and S bits to produce target memory attributes if tre == '1' then walkstate.memattrs = AArch32.RemappedTEXDecode(regime, tex, c, b, s); elsif RemapRegsHaveResetValues() then walkstate.memattrs = AArch32.DefaultTEXDecode(tex, c, b, s); else walkstate.memattrs = MemoryAttributes IMPLEMENTATION_DEFINED; walkstate.permissions.ap = ap; walkstate.permissions.xn = xn; walkstate.permissions.pxn = pxn; walkstate.domain = domain; walkstate.nG = nG; if ss == SS_Secure && ns == '0' then walkstate.baseaddress.paspace = PAS_Secure; else walkstate.baseaddress.paspace = PAS_NonSecure; return (fault, walkstate);
// AArch32.S2IASize() // ================== // Retrieve the number of bits containing the input address for stage 2 translation integer AArch32.S2IASize(bits(4) t0sz) return 32 - SInt(t0sz);
// AArch32.S2StartLevel() // ====================== // Determine the initial lookup level when performing a stage 2 translation // table walk integer AArch32.S2StartLevel(bits(2) sl0) return 2 - UInt(sl0);
// AArch32.S2Walk() // ================ // Traverse stage 2 translation tables in long format to obtain the final descriptor (FaultRecord, TTWState) AArch32.S2Walk(FaultRecord fault, S2TTWParams walkparams, AddressDescriptor ipa) if walkparams.sl0 == '1x' || AArch32.S2InconsistentSL(walkparams) then fault.statuscode = Fault_Translation; fault.level = 1; return (fault, TTWState UNKNOWN); // Input Address size iasize = AArch32.S2IASize(walkparams.t0sz); startlevel = AArch32.S2StartLevel(walkparams.sl0); levels = FINAL_LEVEL - startlevel; granulebits = TGxGranuleBits(walkparams.tgx); stride = granulebits - 3; if !IsZero(VTTBR<47:40>) then fault.statuscode = Fault_AddressSize; fault.level = 0; return (fault, TTWState UNKNOWN); FullAddress baseaddress; baselsb = iasize - (levels*stride + granulebits) + 3; baseaddress.paspace = PAS_NonSecure; baseaddress.address = ZeroExtend(VTTBR<39:baselsb>:Zeros(baselsb)); TTWState walkstate; walkstate.baseaddress = baseaddress; walkstate.level = startlevel; walkstate.istable = TRUE; walkstate.memattrs = WalkMemAttrs(walkparams.sh, walkparams.irgn, walkparams.orgn); indexmsb = iasize - 1; bits(64) descriptor; AddressDescriptor walkaddress; repeat fault.level = walkstate.level; indexlsb = (FINAL_LEVEL - walkstate.level)*stride + granulebits; bits(40) index = ZeroExtend(ipa.paddress.address<indexmsb:indexlsb>:'000'); // Update virtual address for abort functions walkaddress.vaddress = ipa.vaddress; walkaddress.paddress.address = walkstate.baseaddress.address OR ZeroExtend(index); walkaddress.paddress.paspace = walkstate.baseaddress.paspace; if HCR2.CD == '1' then walkaddress.memattrs = NormalNCMemAttr(); walkaddress.memattrs.xs = walkstate.memattrs.xs; else walkaddress.memattrs = walkstate.memattrs; walkaddress.memattrs.shareability = EffectiveShareability(walkaddress.memattrs); (fault, descriptor) = FetchDescriptor(walkparams.ee, walkaddress, fault); if fault.statuscode != Fault_None then return (fault, TTWState UNKNOWN); desctype = AArch32.DecodeDescriptorTypeLD(descriptor, walkstate.level); case desctype of when DescriptorType_Table if !IsZero(descriptor<47:40>) then fault.statuscode = Fault_AddressSize; return (fault, TTWState UNKNOWN); walkstate.baseaddress.address = ZeroExtend(descriptor<39:12>:Zeros(12)); walkstate.level = walkstate.level + 1; indexmsb = indexlsb - 1; when DescriptorType_Invalid fault.statuscode = Fault_Translation; return (fault, TTWState UNKNOWN); when DescriptorType_Page, DescriptorType_Block walkstate.istable = FALSE; until desctype IN {DescriptorType_Page, DescriptorType_Block}; // Check the output address is inside the supported range if !IsZero(descriptor<47:40>) then fault.statuscode = Fault_AddressSize; return (fault, TTWState UNKNOWN); // Check the access flag if descriptor<10> == '0' then fault.statuscode = Fault_AccessFlag; return (fault, TTWState UNKNOWN); // Unpack the descriptor into address and upper and lower block attributes walkstate.baseaddress.address = ZeroExtend(descriptor<39:indexlsb>:Zeros(indexlsb)); walkstate.permissions.s2ap = descriptor<7:6>; walkstate.permissions.s2xn = descriptor<54>; if HaveExtendedExecuteNeverExt() then walkstate.permissions.s2xnx = descriptor<53>; else walkstate.permissions.s2xnx = '0'; memattr = descriptor<5:2>; sh = descriptor<9:8>; walkstate.memattrs = S2DecodeMemAttrs(memattr, sh); walkstate.contiguous = descriptor<52>; return (fault, walkstate);
// AArch32.TranslationSizeSD() // =========================== // Determine the size of the translation integer AArch32.TranslationSizeSD(SDFType sdftype) case sdftype of when SDFType_SmallPage tsize = 12; when SDFType_LargePage tsize = 16; when SDFType_Section tsize = 20; when SDFType_Supersection tsize = 24; return tsize;
// AArch32.GetS1TTWParams() // ======================== // Returns stage 1 translation table walk parameters from respective controlling // system registers. S1TTWParams AArch32.GetS1TTWParams(Regime regime, bits(32) va) S1TTWParams walkparams; case regime of when Regime_EL2 walkparams = AArch32.S1TTWParamsEL2(); when Regime_EL10 walkparams = AArch32.S1TTWParamsEL10(va); when Regime_EL30 walkparams = AArch32.S1TTWParamsEL30(va); return walkparams;
// AArch32.GetS2TTWParams() // ======================== // Gather walk parameters for stage 2 translation S2TTWParams AArch32.GetS2TTWParams() S2TTWParams walkparams; walkparams.tgx = TGx_4KB; walkparams.s = VTCR.S; walkparams.t0sz = VTCR.T0SZ; walkparams.sl0 = VTCR.SL0; walkparams.irgn = VTCR.IRGN0; walkparams.orgn = VTCR.ORGN0; walkparams.sh = VTCR.SH0; walkparams.ee = HSCTLR.EE; walkparams.ptw = HCR.PTW; walkparams.vm = HCR.VM OR HCR.DC; // VTCR.S must match VTCR.T0SZ[3] if walkparams.s != walkparams.t0sz<3> then (-, walkparams.t0sz) = ConstrainUnpredictableBits(Unpredictable_RESVTCRS); return walkparams;
// AArch32.GetVARange() // ==================== // Select the translation base address for stage 1 long-descriptor walks VARange AArch32.GetVARange(bits(32) va, bits(3) t0sz, bits(3) t1sz) // Lower range Input Address size lo_iasize = AArch32.S1IASize(t0sz); // Upper range Input Address size up_iasize = AArch32.S1IASize(t1sz); if t1sz == '000' && t0sz == '000' then return VARange_LOWER; elsif t1sz == '000' then return if IsZero(va<31:lo_iasize>) then VARange_LOWER else VARange_UPPER; elsif t0sz == '000' then return if IsOnes(va<31:up_iasize>) then VARange_UPPER else VARange_LOWER; elsif IsZero(va<31:lo_iasize>) then return VARange_LOWER; elsif IsOnes(va<31:up_iasize>) then return VARange_UPPER; else // Will be reported as a Translation Fault return VARange UNKNOWN;
// AArch32.S1DCacheEnabled() // ========================= // Determine cacheability of stage 1 data accesses boolean AArch32.S1DCacheEnabled(Regime regime) case regime of when Regime_EL30 return SCTLR_S.C == '1'; when Regime_EL2 return HSCTLR.C == '1'; when Regime_EL10 return (if HaveAArch32EL(EL3) then SCTLR_NS.C else SCTLR.C) == '1';
// AArch32.S1ICacheEnabled() // ========================= // Determine cacheability of stage 1 instruction fetches boolean AArch32.S1ICacheEnabled(Regime regime) case regime of when Regime_EL30 return SCTLR_S.I == '1'; when Regime_EL2 return HSCTLR.I == '1'; when Regime_EL10 return (if HaveAArch32EL(EL3) then SCTLR_NS.I else SCTLR.I) == '1';
// AArch32.S1TTWParamsEL10() // ========================= // Gather stage 1 translation table walk parameters for EL1&0 regime // (with EL2 enabled or disabled). S1TTWParams AArch32.S1TTWParamsEL10(bits(32) va) if HaveAArch32EL(EL3) then ttbcr = TTBCR_NS; ttbcr2 = TTBCR2_NS; sctlr = SCTLR_NS; mair = MAIR1_NS:MAIR0_NS; sif = SCR.SIF; else ttbcr = TTBCR; ttbcr2 = TTBCR2; sctlr = SCTLR; mair = MAIR1:MAIR0; sif = SCR_EL3.SIF; assert ttbcr.EAE == '1'; S1TTWParams walkparams; walkparams.t0sz = ttbcr.T0SZ; walkparams.t1sz = ttbcr.T1SZ; walkparams.ee = sctlr.EE; walkparams.wxn = sctlr.WXN; walkparams.uwxn = sctlr.UWXN; walkparams.ntlsmd = if HaveTrapLoadStoreMultipleDeviceExt() then sctlr.nTLSMD else '1'; walkparams.mair = mair; walkparams.sif = sif; varange = AArch32.GetVARange(va, walkparams.t0sz, walkparams.t1sz); if varange == VARange_LOWER then walkparams.sh = ttbcr.SH0; walkparams.irgn = ttbcr.IRGN0; walkparams.orgn = ttbcr.ORGN0; walkparams.hpd = if AArch32.HaveHPDExt() then ttbcr.T2E AND ttbcr2.HPD0 else '0'; else walkparams.sh = ttbcr.SH1; walkparams.irgn = ttbcr.IRGN1; walkparams.orgn = ttbcr.ORGN1; walkparams.hpd = if AArch32.HaveHPDExt() then ttbcr.T2E AND ttbcr2.HPD1 else '0'; return walkparams;
// AArch32.S1TTWParamsEL2() // ======================== // Gather stage 1 translation table walk parameters for EL2 regime S1TTWParams AArch32.S1TTWParamsEL2() S1TTWParams walkparams; walkparams.tgx = TGx_4KB; walkparams.t0sz = HTCR.T0SZ; walkparams.irgn = HTCR.SH0; walkparams.orgn = HTCR.IRGN0; walkparams.sh = HTCR.ORGN0; walkparams.hpd = if AArch32.HaveHPDExt() then HTCR.HPD else '0'; walkparams.ee = HSCTLR.EE; walkparams.wxn = HSCTLR.WXN; if HaveTrapLoadStoreMultipleDeviceExt() then walkparams.ntlsmd = HSCTLR.nTLSMD; else walkparams.ntlsmd = '1'; walkparams.mair = HMAIR1:HMAIR0; return walkparams;
// AArch32.S1TTWParamsEL30() // ========================= // Gather stage 1 translation table walk parameters for EL3&0 regime S1TTWParams AArch32.S1TTWParamsEL30(bits(32) va) assert TTBCR_S.EAE == '1'; S1TTWParams walkparams; walkparams.t0sz = TTBCR_S.T0SZ; walkparams.t1sz = TTBCR_S.T1SZ; walkparams.ee = SCTLR_S.EE; walkparams.wxn = SCTLR_S.WXN; walkparams.uwxn = SCTLR_S.UWXN; walkparams.ntlsmd = if HaveTrapLoadStoreMultipleDeviceExt() then SCTLR_S.nTLSMD else '1'; walkparams.mair = MAIR1_S:MAIR0_S; walkparams.sif = SCR.SIF; varange = AArch32.GetVARange(va, walkparams.t0sz, walkparams.t1sz); if varange == VARange_LOWER then walkparams.sh = TTBCR_S.SH0; walkparams.irgn = TTBCR_S.IRGN0; walkparams.orgn = TTBCR_S.ORGN0; walkparams.hpd = if AArch32.HaveHPDExt() then TTBCR_S.T2E AND TTBCR2_S.HPD0 else '0'; else walkparams.sh = TTBCR_S.SH1; walkparams.irgn = TTBCR_S.IRGN1; walkparams.orgn = TTBCR_S.ORGN1; walkparams.hpd = if AArch32.HaveHPDExt() then TTBCR_S.T2E AND TTBCR2_S.HPD1 else '0'; return walkparams;
// BRBCycleCountingEnabled() // ========================= // Returns TRUE if the BRBINF<n>_EL1.{CCU, CC} fields are valid, FALSE otherwise. boolean BRBCycleCountingEnabled() if EL2Enabled() && BRBCR_EL2.CC == '0' then return FALSE; if BRBCR_EL1.CC == '0' then return FALSE; return TRUE;
// BRBEBranch() // ============ // Called to write branch record for the following branches when BRB is active: // direct branches, // indirect branches, // direct branches with link, // indirect branches with link, // returns from subroutines. BRBEBranch(BranchType br_type, boolean cond, bits(64) target_address) if BranchRecordAllowed(PSTATE.EL) && FilterBranchRecord(br_type, cond) then bits(6) branch_type; case br_type of when BranchType_DIR branch_type = if cond then '001000' else '000000'; when BranchType_INDIR branch_type = '000001'; when BranchType_DIRCALL branch_type = '000010'; when BranchType_INDCALL branch_type = '000011'; when BranchType_RET branch_type = '000101'; otherwise Unreachable(); bit ccu; bits(14) cc; (ccu, cc) = BranchEncCycleCount(); bit lastfailed = if HaveTME() then BRBFCR_EL1.LASTFAILED else '0'; bit transactional = if HaveTME() && TSTATE.depth > 0 then '1' else '0'; bits(2) el = PSTATE.EL; bit mispredict = if BRBEMispredictAllowed() && BranchMispredict() then '1' else '0'; UpdateBranchRecordBuffer(ccu, cc, lastfailed, transactional, branch_type, el, mispredict, '11', PC[], target_address); BRBFCR_EL1.LASTFAILED = '0'; return;
// BRBEBranchOnISB() // ================= // Returns TRUE if ISBs generate Branch records, and FALSE otherwise. boolean BRBEBranchOnISB() return boolean IMPLEMENTATION_DEFINED "ISB generates Branch records";
// BRBEDebugStateExit() // ==================== // Called to write Debug state exit branch record when BRB is active. BRBEDebugStateExit(bits(64) target_address) if BranchRecordAllowed(PSTATE.EL) then // Debug state is a prohibited region, therefore ccu=1, cc=0, source_address=0 bits(6) branch_type = '111001'; bit ccu = '1'; bits(14) cc = Zeros(14); bit lastfailed = if HaveTME() then BRBFCR_EL1.LASTFAILED else '0'; bit transactional = '0'; bits(2) el = PSTATE.EL; bit mispredict = '0'; UpdateBranchRecordBuffer(ccu, cc, lastfailed, transactional, branch_type, el, mispredict, '01', Zeros(64), target_address); BRBFCR_EL1.LASTFAILED = '0'; return;
// BRBEException() // =============== // Called to write exception branch record when BRB is active. BRBEException(Exception exception, bits(64) preferred_exception_return, bits(64) target_address, bits(2) target_el) case target_el of when EL3 if !HaveBRBEv1p1() || (MDCR_EL3.E3BREC == MDCR_EL3.E3BREW) then return; when EL2 if BRBCR_EL2.EXCEPTION == '0' then return; when EL1 if BRBCR_EL1.EXCEPTION == '0' then return; boolean source_valid = BranchRecordAllowed(PSTATE.EL); boolean target_valid = BranchRecordAllowed(target_el); if source_valid || target_valid then bits(6) branch_type; case exception of when Exception_Uncategorized branch_type = '100011'; // Trap when Exception_WFxTrap branch_type = '100011'; // Trap when Exception_CP15RTTrap branch_type = '100011'; // Trap when Exception_CP15RRTTrap branch_type = '100011'; // Trap when Exception_CP14RTTrap branch_type = '100011'; // Trap when Exception_CP14DTTrap branch_type = '100011'; // Trap when Exception_AdvSIMDFPAccessTrap branch_type = '100011'; // Trap when Exception_FPIDTrap branch_type = '100011'; // Trap when Exception_PACTrap branch_type = '100011'; // Trap when Exception_TSTARTAccessTrap branch_type = '100011'; // Trap when Exception_CP14RRTTrap branch_type = '100011'; // Trap when Exception_BranchTarget branch_type = '101011'; // Inst Fault when Exception_IllegalState branch_type = '100011'; // Trap when Exception_SupervisorCall branch_type = '100010'; // Call when Exception_HypervisorCall branch_type = '100010'; // Call when Exception_MonitorCall branch_type = '100010'; // Call when Exception_SystemRegisterTrap branch_type = '100011'; // Trap when Exception_SVEAccessTrap branch_type = '100011'; // Trap when Exception_SMEAccessTrap branch_type = '100011'; // Trap when Exception_ERetTrap branch_type = '100011'; // Trap when Exception_PACFail branch_type = '101100'; // Data Fault when Exception_InstructionAbort branch_type = '101011'; // Inst Fault when Exception_PCAlignment branch_type = '101010'; // Alignment when Exception_DataAbort branch_type = '101100'; // Data Fault when Exception_NV2DataAbort branch_type = '101100'; // Data Fault when Exception_SPAlignment branch_type = '101010'; // Alignment when Exception_FPTrappedException branch_type = '100011'; // Trap when Exception_SError branch_type = '100100'; // System Error when Exception_Breakpoint branch_type = '100110'; // Inst debug when Exception_SoftwareStep branch_type = '100110'; // Inst debug when Exception_Watchpoint branch_type = '100111'; // Data debug when Exception_NV2Watchpoint branch_type = '100111'; // Data debug when Exception_SoftwareBreakpoint branch_type = '100110'; // Inst debug when Exception_IRQ branch_type = '101110'; // IRQ when Exception_FIQ branch_type = '101111'; // FIQ otherwise Unreachable(); bit ccu; bits(14) cc; (ccu, cc) = BranchEncCycleCount(); bit lastfailed = if HaveTME() then BRBFCR_EL1.LASTFAILED else '0'; bit transactional = if source_valid && HaveTME() && TSTATE.depth > 0 then '1' else '0'; bits(2) el = if target_valid then target_el else '00'; bit mispredict = '0'; bit sv = if source_valid then '1' else '0'; bit tv = if target_valid then '1' else '0'; bits(64) source_address = if source_valid then preferred_exception_return else Zeros(64); if !target_valid then target_address = Zeros(64); UpdateBranchRecordBuffer(ccu, cc, lastfailed, transactional, branch_type, el, mispredict, sv:tv, source_address, target_address); BRBFCR_EL1.LASTFAILED = '0'; return;
// BRBEExceptionReturn() // ===================== // Called to write exception return branch record when BRB is active. BRBEExceptionReturn(bits(64) target_address, bits(2) source_el) case source_el of when EL3 if !HaveBRBEv1p1() || (MDCR_EL3.E3BREC == MDCR_EL3.E3BREW) then return; when EL2 if BRBCR_EL2.ERTN == '0' then return; when EL1 if BRBCR_EL1.ERTN == '0' then return; boolean source_valid = BranchRecordAllowed(source_el); boolean target_valid = BranchRecordAllowed(PSTATE.EL); if source_valid || target_valid then bits(6) branch_type = '000111'; bit ccu; bits(14) cc; (ccu, cc) = BranchEncCycleCount(); bit lastfailed = if HaveTME() then BRBFCR_EL1.LASTFAILED else '0'; bit transactional = if source_valid && HaveTME() && TSTATE.depth > 0 then '1' else '0'; bits(2) el = if target_valid then PSTATE.EL else '00'; bit mispredict = if source_valid && BRBEMispredictAllowed() && BranchMispredict() then '1' else '0'; bit sv = if source_valid then '1' else '0'; bit tv = if target_valid then '1' else '0'; bits(64) source_address = if source_valid then PC[] else Zeros(64); if !target_valid then target_address = Zeros(64); UpdateBranchRecordBuffer(ccu, cc, lastfailed, transactional, branch_type, el, mispredict, sv:tv, source_address, target_address); BRBFCR_EL1.LASTFAILED = '0'; return;
// BRBEMispredictAllowed() // ======================= // Returns TRUE if the recording of branch misprediction is allowed, FALSE otherwise. boolean BRBEMispredictAllowed() if EL2Enabled() && BRBCR_EL2.MPRED == '0' then return FALSE; if BRBCR_EL1.MPRED == '0' then return FALSE; return TRUE;
// BRBETimeStamp() // =============== // Returns captured timestamp. TimeStamp BRBETimeStamp() if HaveEL(EL2) then TS_el2 = BRBCR_EL2.TS; if !HaveECVExt() && TS_el2 == '10' then // Reserved value (-, TS_el2) = ConstrainUnpredictableBits(Unpredictable_EL2TIMESTAMP); case TS_el2 of when '00' // Falls out to check BRBCR_EL1.TS when '01' return TimeStamp_Virtual; when '10' assert HaveECVExt(); // Otherwise ConstrainUnpredictableBits removes this case return TimeStamp_OffsetPhysical; when '11' return TimeStamp_Physical; TS_el1 = BRBCR_EL1.TS; if TS_el1 == '00' || (!HaveECVExt() && TS_el1 == '10') then // Reserved value (-, TS_el1) = ConstrainUnpredictableBits(Unpredictable_EL1TIMESTAMP); case TS_el1 of when '01' return TimeStamp_Virtual; when '10' return TimeStamp_OffsetPhysical; when '11' return TimeStamp_Physical; otherwise Unreachable(); // ConstrainUnpredictableBits removes this case
// BRB_IALL() // ========== // Called to perform invalidation of branch records BRB_IALL() for i = 0 to GetBRBENumRecords() - 1 Records_SRC[i] = Zeros(64); Records_TGT[i] = Zeros(64); Records_INF[i] = Zeros(64);
// BRB_INJ() // ========= // Called to perform manual injection of branch records. BRB_INJ() UpdateBranchRecordBuffer(BRBINFINJ_EL1.CCU, BRBINFINJ_EL1.CC, BRBINFINJ_EL1.LASTFAILED, BRBINFINJ_EL1.T, BRBINFINJ_EL1.TYPE, BRBINFINJ_EL1.EL, BRBINFINJ_EL1.MPRED, BRBINFINJ_EL1.VALID, BRBSRCINJ_EL1.ADDRESS, BRBTGTINJ_EL1.ADDRESS); BRBINFINJ_EL1 = bits(64) UNKNOWN; BRBSRCINJ_EL1 = bits(64) UNKNOWN; BRBTGTINJ_EL1 = bits(64) UNKNOWN;
// The first return result is '1' if either of the following is true, and '0' otherwise: // - This is the first Branch record after the PE exited a Prohibited Region. // - This is the first Branch record after cycle counting has been enabled. // If the first return return is '0', the second return result is the encoded cycle count // since the last branch. // The format of this field uses a mantissa and exponent to express the cycle count value. // - bits[7:0] indicate the mantissa M. // - bits[13:8] indicate the exponent E. // The cycle count is expressed using the following function: // cycle_count = (if IsZero(E) then UInt(M) else UInt('1':M:Zeros(UInt(E)-1))) // A value of all ones in both the mantissa and exponent indicates the cycle count value // exceeded the size of the cycle counter. // If the cycle count is not known, the second return result is zero. (bit, bits(14)) BranchEncCycleCount();
// Returns TRUE if the branch being executed was mispredicted, FALSE otherwise. boolean BranchMispredict();
// If the cycle count is known, the return result is the cycle count since the last branch. integer BranchRawCycleCount();
// BranchRecordAllowed() // ===================== // Returns TRUE if branch recording is allowed, FALSE otherwise. boolean BranchRecordAllowed(bits(2) el) if BRBFCR_EL1.PAUSED == '1' then return FALSE; if el == EL3 && HaveBRBEv1p1() then return (MDCR_EL3.E3BREC != MDCR_EL3.E3BREW); if HaveEL(EL3) && (MDCR_EL3.SBRBE == '00' || (IsSecure() && MDCR_EL3.SBRBE == '01')) then return FALSE; case el of when EL3 return FALSE; // FEAT_BRBEv1p1 not implemented when EL2 return BRBCR_EL2.E2BRE == '1'; when EL1 return BRBCR_EL1.E1BRE == '1'; when EL0 if EL2Enabled() && HCR_EL2.TGE == '1' then return BRBCR_EL2.E0HBRE == '1'; else return BRBCR_EL1.E0BRE == '1';
array [0..63] of BRBSRCType Records_SRC; array [0..63] of BRBTGTType Records_TGT; array [0..63] of BRBINFType Records_INF;
// FilterBranchRecord() // ==================== // Returns TRUE if the branch record is not filtered out, FALSE otherwise. boolean FilterBranchRecord(BranchType br, boolean cond) case br of when BranchType_DIRCALL return BRBFCR_EL1.DIRCALL != BRBFCR_EL1.EnI; when BranchType_INDCALL return BRBFCR_EL1.INDCALL != BRBFCR_EL1.EnI; when BranchType_RET return BRBFCR_EL1.RTN != BRBFCR_EL1.EnI; when BranchType_DIR if cond then return BRBFCR_EL1.CONDDIR != BRBFCR_EL1.EnI; else return BRBFCR_EL1.DIRECT != BRBFCR_EL1.EnI; when BranchType_INDIR return BRBFCR_EL1.INDIRECT != BRBFCR_EL1.EnI; otherwise Unreachable(); return FALSE;
// Returns TRUE if branch recorded is the first branch after a prohibited region, // FALSE otherwise. FirstBranchAfterProhibited();
// GetBRBENumRecords() // =================== // Returns the number of branch records implemented. integer GetBRBENumRecords() assert UInt(BRBIDR0_EL1.NUMREC) IN {0x08, 0x10, 0x20, 0x40}; return integer IMPLEMENTATION_DEFINED "Number of BRB records";
// Getter functions for branch records // =================================== // Functions used by MRS instructions that access branch records BRBSRCType BRBSRC_EL1[integer n] assert n IN {0..31}; integer record = UInt(BRBFCR_EL1.BANK:n<4:0>); if record < GetBRBENumRecords() then return Records_SRC[record]; else return Zeros(64); BRBTGTType BRBTGT_EL1[integer n] assert n IN {0..31}; integer record = UInt(BRBFCR_EL1.BANK:n<4:0>); if record < GetBRBENumRecords() then return Records_TGT[record]; else return Zeros(64); BRBINFType BRBINF_EL1[integer n] assert n IN {0..31}; integer record = UInt(BRBFCR_EL1.BANK:n<4:0>); if record < GetBRBENumRecords() then return Records_INF[record]; else return Zeros(64);
// UpdateBranchRecordBuffer() // ========================== // Updates branch record buffer on valid records. UpdateBranchRecordBuffer(bit ccu, bits(14) cc, bit lastfailed, bit transactional, bits(6) branch_type, bits(2) el, bit mispredict, bits(2) valid, bits(64) source_address, bits(64) target_address) // Shift the Branch Records in the buffer for i = GetBRBENumRecords() - 1 downto 1 Records_SRC[i] = Records_SRC[i - 1]; Records_TGT[i] = Records_TGT[i - 1]; Records_INF[i] = Records_INF[i - 1]; Records_INF[0].CCU = ccu; Records_INF[0].CC = cc; Records_INF[0].EL = el; Records_INF[0].VALID = valid; Records_INF[0].T = transactional; Records_INF[0].LASTFAILED = lastfailed; Records_INF[0].MPRED = mispredict; Records_INF[0].TYPE = branch_type; Records_SRC[0] = source_address; Records_TGT[0] = target_address; return;
// AArch64.BreakpointMatch() // ========================= // Breakpoint matching in an AArch64 translation regime. boolean AArch64.BreakpointMatch(integer n, bits(64) vaddress, AccType acctype, integer size) assert !ELUsingAArch32(S1TranslationRegime()); assert n < NumBreakpointsImplemented(); enabled = DBGBCR_EL1[n].E == '1'; ispriv = PSTATE.EL != EL0; linked = DBGBCR_EL1[n].BT == '0x01'; isbreakpnt = TRUE; linked_to = FALSE; state_match = AArch64.StateMatch(DBGBCR_EL1[n].SSC, DBGBCR_EL1[n].HMC, DBGBCR_EL1[n].PMC, linked, DBGBCR_EL1[n].LBN, isbreakpnt, acctype, ispriv); value_match = AArch64.BreakpointValueMatch(n, vaddress, linked_to); if HaveAArch32() && size == 4 then // Check second halfword // If the breakpoint address and BAS of an Address breakpoint match the address of the // second halfword of an instruction, but not the address of the first halfword, it is // CONSTRAINED UNPREDICTABLE whether or not this breakpoint generates a Breakpoint debug // event. match_i = AArch64.BreakpointValueMatch(n, vaddress + 2, linked_to); if !value_match && match_i then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); if vaddress<1> == '1' && DBGBCR_EL1[n].BAS == '1111' then // The above notwithstanding, if DBGBCR_EL1[n].BAS == '1111', then it is CONSTRAINED // UNPREDICTABLE whether or not a Breakpoint debug event is generated for an instruction // at the address DBGBVR_EL1[n]+2. if value_match then value_match = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF); match = value_match && state_match && enabled; return match;
// AArch64.BreakpointValueMatch() // ============================== boolean AArch64.BreakpointValueMatch(integer n, bits(64) vaddress, boolean linked_to) // "n" is the identity of the breakpoint unit to match against. // "vaddress" is the current instruction address, ignored if linked_to is TRUE and for Context // matching breakpoints. // "linked_to" is TRUE if this is a call from StateMatch for linking. // If a non-existent breakpoint then it is CONSTRAINED UNPREDICTABLE whether this gives // no match or the breakpoint is mapped to another UNKNOWN implemented breakpoint. if n >= NumBreakpointsImplemented() then (c, n) = ConstrainUnpredictableInteger(0, NumBreakpointsImplemented() - 1, Unpredictable_BPNOTIMPL); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return FALSE; // If this breakpoint is not enabled, it cannot generate a match. (This could also happen on a // call from StateMatch for linking). if DBGBCR_EL1[n].E == '0' then return FALSE; context_aware = (n >= (NumBreakpointsImplemented() - NumContextAwareBreakpointsImplemented())); // If BT is set to a reserved type, behaves either as disabled or as a not-reserved type. dbgtype = DBGBCR_EL1[n].BT; if ((dbgtype IN {'011x','11xx'} && !HaveVirtHostExt() && !HaveV82Debug()) || // Context matching dbgtype == '010x' || // Reserved (dbgtype != '0x0x' && !context_aware) || // Context matching (dbgtype == '1xxx' && !HaveEL(EL2))) then // EL2 extension (c, dbgtype) = ConstrainUnpredictableBits(Unpredictable_RESBPTYPE); assert c IN {Constraint_DISABLED, Constraint_UNKNOWN}; if c == Constraint_DISABLED then return FALSE; // Otherwise the value returned by ConstrainUnpredictableBits must be a not-reserved value // Determine what to compare against. match_addr = (dbgtype == '0x0x'); match_vmid = (dbgtype == '10xx'); match_cid = (dbgtype == '001x'); match_cid1 = (dbgtype IN { '101x', 'x11x'}); match_cid2 = (dbgtype == '11xx'); linked = (dbgtype == 'xxx1'); // If this is a call from StateMatch, return FALSE if the breakpoint is not programmed for a // VMID and/or context ID match, of if not context-aware. The above assertions mean that the // code can just test for match_addr == TRUE to confirm all these things. if linked_to && (!linked || match_addr) then return FALSE; // If called from BreakpointMatch return FALSE for Linked context ID and/or VMID matches. if !linked_to && linked && !match_addr then return FALSE; // Do the comparison. if match_addr then byte = UInt(vaddress<1:0>); if HaveAArch32() then // T32 instructions can be executed at EL0 in an AArch64 translation regime. assert byte IN {0,2}; // "vaddress" is halfword aligned byte_select_match = (DBGBCR_EL1[n].BAS<byte> == '1'); else assert byte == 0; // "vaddress" is word aligned byte_select_match = TRUE; // DBGBCR_EL1[n].BAS<byte> is RES1 // If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB // of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be // included in the match. // If 'vaddress' is outside of the current virtual address space, then the access // generates a Translation fault. integer top = AArch64.VAMax(); if !IsOnes(DBGBVR_EL1[n]<63:top>) && !IsZero(DBGBVR_EL1[n]<63:top>) then if ConstrainUnpredictableBool(Unpredictable_DBGxVR_RESS) then top = 63; BVR_match = (vaddress<top:2> == DBGBVR_EL1[n]<top:2>) && byte_select_match; elsif match_cid then if IsInHost() then BVR_match = (CONTEXTIDR_EL2<31:0> == DBGBVR_EL1[n]<31:0>); else BVR_match = (PSTATE.EL IN {EL0, EL1} && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>); elsif match_cid1 then BVR_match = (PSTATE.EL IN {EL0, EL1} && !IsInHost() && CONTEXTIDR_EL1<31:0> == DBGBVR_EL1[n]<31:0>); if match_vmid then if !Have16bitVMID() || VTCR_EL2.VS == '0' then vmid = ZeroExtend(VTTBR_EL2.VMID<7:0>, 16); bvr_vmid = ZeroExtend(DBGBVR_EL1[n]<39:32>, 16); else vmid = VTTBR_EL2.VMID; bvr_vmid = DBGBVR_EL1[n]<47:32>; BXVR_match = (PSTATE.EL IN {EL0, EL1} && EL2Enabled() && !IsInHost() && vmid == bvr_vmid); elsif match_cid2 then BXVR_match = (PSTATE.EL != EL3 && (HaveVirtHostExt() || HaveV82Debug()) && EL2Enabled() && DBGBVR_EL1[n]<63:32> == CONTEXTIDR_EL2<31:0>); bvr_match_valid = (match_addr || match_cid || match_cid1); bxvr_match_valid = (match_vmid || match_cid2); match = (!bxvr_match_valid || BXVR_match) && (!bvr_match_valid || BVR_match); return match;
// AArch64.StateMatch() // ==================== // Determine whether a breakpoint or watchpoint is enabled in the current mode and state. boolean AArch64.StateMatch(bits(2) SSC, bit HMC, bits(2) PxC, boolean linked, bits(4) LBN, boolean isbreakpnt, AccType acctype, boolean ispriv) // "SSC", "HMC", "PxC" are the control fields from the DBGBCR[n] or DBGWCR[n] register. // "linked" is TRUE if this is a linked breakpoint/watchpoint type. // "LBN" is the linked breakpoint number from the DBGBCR[n] or DBGWCR[n] register. // "isbreakpnt" is TRUE for breakpoints, FALSE for watchpoints. // "ispriv" is valid for watchpoints, and selects between privileged and unprivileged accesses. // If parameters are set to a reserved type, behaves as either disabled or a defined type (c, SSC, HMC, PxC) = CheckValidStateMatch(SSC, HMC, PxC, isbreakpnt); if c == Constraint_DISABLED then return FALSE; // Otherwise the HMC,SSC,PxC values are either valid or the values returned by // CheckValidStateMatch are valid. EL3_match = HaveEL(EL3) && HMC == '1' && SSC<0> == '0'; EL2_match = HaveEL(EL2) && ((HMC == '1' && (SSC:PxC != '1000')) || SSC == '11'); EL1_match = PxC<0> == '1'; EL0_match = PxC<1> == '1'; if HaveNV2Ext() && acctype == AccType_NV2REGISTER && !isbreakpnt then priv_match = EL2_match; elsif !ispriv && !isbreakpnt then priv_match = EL0_match; else case PSTATE.EL of when EL3 priv_match = EL3_match; when EL2 priv_match = EL2_match; when EL1 priv_match = EL1_match; when EL0 priv_match = EL0_match; case SSC of when '00' security_state_match = TRUE; // Both when '01' security_state_match = !IsSecure(); // Non-secure only when '10' security_state_match = IsSecure(); // Secure only when '11' security_state_match = (HMC == '1' || IsSecure()); // HMC=1 -> Both, 0 -> Secure only if linked then // "LBN" must be an enabled context-aware breakpoint unit. If it is not context-aware then // it is CONSTRAINED UNPREDICTABLE whether this gives no match, or LBN is mapped to some // UNKNOWN breakpoint that is context-aware. lbn = UInt(LBN); first_ctx_cmp = NumBreakpointsImplemented() - NumContextAwareBreakpointsImplemented(); last_ctx_cmp = NumBreakpointsImplemented() - 1; if (lbn < first_ctx_cmp || lbn > last_ctx_cmp) then (c, lbn) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE linked = FALSE; // No linking // Otherwise ConstrainUnpredictableInteger returned a context-aware breakpoint if linked then vaddress = bits(64) UNKNOWN; linked_to = TRUE; linked_match = AArch64.BreakpointValueMatch(lbn, vaddress, linked_to); return priv_match && security_state_match && (!linked || linked_match);
// AArch64.GenerateDebugExceptions() // ================================= boolean AArch64.GenerateDebugExceptions() return AArch64.GenerateDebugExceptionsFrom(PSTATE.EL, IsSecure(), PSTATE.D);
// AArch64.GenerateDebugExceptionsFrom() // ===================================== boolean AArch64.GenerateDebugExceptionsFrom(bits(2) from, boolean secure, bit mask) if OSLSR_EL1.OSLK == '1' || DoubleLockStatus() || Halted() then return FALSE; route_to_el2 = HaveEL(EL2) && (!secure || IsSecureEL2Enabled()) && (HCR_EL2.TGE == '1' || MDCR_EL2.TDE == '1'); target = (if route_to_el2 then EL2 else EL1); if HaveEL(EL3) && secure then enabled = MDCR_EL3.SDD == '0'; if from == EL0 && ELUsingAArch32(EL1) then enabled = enabled || SDER32_EL3.SUIDEN == '1'; else enabled = TRUE; if from == target then enabled = enabled && MDSCR_EL1.KDE == '1' && mask == '0'; else enabled = enabled && UInt(target) > UInt(from); return enabled;
// AArch64.CheckForPMUOverflow() // ============================= // Signal Performance Monitors overflow IRQ and CTI overflow events AArch64.CheckForPMUOverflow() pmuirq = PMCR_EL0.E == '1' && PMINTENSET_EL1.C == '1' && PMOVSSET_EL0.C == '1'; for idx = 0 to GetNumEventCounters() - 1 E = if AArch64.PMUCounterIsHyp(idx) then MDCR_EL2.HPME else PMCR_EL0.E; if E == '1' && PMINTENSET_EL1<idx> == '1' && PMOVSSET_EL0<idx> == '1' then pmuirq = TRUE; SetInterruptRequestLevel(InterruptID_PMUIRQ, if pmuirq then HIGH else LOW); CTI_SetEventLevel(CrossTriggerIn_PMUOverflow, if pmuirq then HIGH else LOW); // The request remains set until the condition is cleared. (For example, an interrupt handler // or cross-triggered event handler clears the overflow status flag by writing to PMOVSCLR_EL0.)
// AArch64.ClearEventCounters() // ============================ // Zero all the event counters. AArch64.ClearEventCounters() for idx = 0 to AArch64.GetNumEventCountersAccessible() - 1 PMEVCNTR_EL0[idx] = Zeros();
// AArch64.CountPMUEvents() // ======================== // Return TRUE if counter "idx" should count its event. For the cycle counter, idx == CYCLE_COUNTER_ID. boolean AArch64.CountPMUEvents(integer idx) assert idx == CYCLE_COUNTER_ID || idx < GetNumEventCounters(); // Event counting is disabled in Debug state debug = Halted(); // Software can reserve some counters for EL2 resvd_for_el2 = AArch64.PMUCounterIsHyp(idx); // Main enable controls if idx == CYCLE_COUNTER_ID then enabled = PMCR_EL0.E == '1' && PMCNTENSET_EL0.C == '1'; else E = if resvd_for_el2 then MDCR_EL2.HPME else PMCR_EL0.E; enabled = E == '1' && PMCNTENSET_EL0<idx> == '1'; // Event counting is allowed unless it is prohibited by any rule below prohibited = FALSE; // Event counting in Secure state is prohibited if all of: // * EL3 is implemented // * MDCR_EL3.SPME == 0, and either: // - FEAT_PMUv3p7 is not implemented // - MDCR_EL3.MPMX == 0 if HaveEL(EL3) && IsSecure() then if HavePMUv3p7() then prohibited = MDCR_EL3.<SPME,MPMX> == '00'; else prohibited = MDCR_EL3.SPME == '0'; // Event counting at EL3 is prohibited if all of: // * FEAT_PMUv3p7 is implemented // * One of the following is true: // - MDCR_EL3.SPME == 0 // - PMNx is not reserved for EL2 // * MDCR_EL3.MPMX == 1 if !prohibited && PSTATE.EL == EL3 && HavePMUv3p7() then prohibited = MDCR_EL3.MPMX == '1' && (MDCR_EL3.SPME == '0' || !resvd_for_el2); // Event counting at EL2 is prohibited if all of: // * The HPMD Extension is implemented // * PMNx is not reserved for EL2 // * MDCR_EL2.HPMD == 1 if !prohibited && PSTATE.EL == EL2 && HaveHPMDExt() && !resvd_for_el2 then prohibited = MDCR_EL2.HPMD == '1'; // The IMPLEMENTATION DEFINED authentication interface might override software if prohibited && !HaveNoSecurePMUDisableOverride() then prohibited = !ExternalSecureNoninvasiveDebugEnabled(); // PMCR_EL0.DP disables the cycle counter when event counting is prohibited if prohibited && idx == CYCLE_COUNTER_ID then enabled = enabled && (PMCR_EL0.DP == '0'); prohibited = FALSE; // Otherwise whether event counting is prohibited does not affect the cycle counter // If FEAT_PMUv3p5 is implemented, cycle counting can be prohibited. // This is not overridden by PMCR_EL0.DP. if HavePMUv3p5() && idx == CYCLE_COUNTER_ID then if HaveEL(EL3) && IsSecure() && MDCR_EL3.SCCD == '1' then prohibited = TRUE; if PSTATE.EL == EL2 && MDCR_EL2.HCCD == '1' then prohibited = TRUE; // If FEAT_PMUv3p7 is implemented, cycle counting an be prohibited at EL3. // This is not overriden by PMCR_EL0.DP. if HavePMUv3p7() && idx == CYCLE_COUNTER_ID then if PSTATE.EL == EL3 && MDCR_EL3.MCCD == '1' then prohibited = TRUE; // Event counting might be frozen frozen = FALSE; // If FEAT_PMUv3p7 is implemented, event counting can be frozen if HavePMUv3p7() && idx != CYCLE_COUNTER_ID then ovflws = ZeroExtend(PMOVSSET_EL0<GetNumEventCounters()-1:0>); if resvd_for_el2 then FZ = MDCR_EL2.HPMFZO; ovflws<UInt(MDCR_EL2.HPMN)-1:0> = Zeros(); else FZ = PMCR_EL0.FZO; if HaveEL(EL2) && UInt(MDCR_EL2.HPMN) < GetNumEventCounters() then ovflws<GetNumEventCounters()-1:UInt(MDCR_EL2.HPMN)> = Zeros(); frozen = FZ == '1' && !IsZero(ovflws); // Event counting can be filtered by the {P, U, NSK, NSU, NSH, M, SH} bits filter = if idx == CYCLE_COUNTER_ID then PMCCFILTR_EL0<31:0> else PMEVTYPER_EL0[idx]<31:0>; P = filter<31>; U = filter<30>; NSK = if HaveEL(EL3) then filter<29> else '0'; NSU = if HaveEL(EL3) then filter<28> else '0'; NSH = if HaveEL(EL2) then filter<27> else '0'; M = if HaveEL(EL3) then filter<26> else '0'; SH = if HaveEL(EL3) && HaveSecureEL2Ext() then filter<24> else '0'; case PSTATE.EL of when EL0 filtered = if IsSecure() then U == '1' else U != NSU; when EL1 filtered = if IsSecure() then P == '1' else P != NSK; when EL2 filtered = if IsSecure() then NSH == SH else NSH == '0'; when EL3 filtered = M != P; return !debug && enabled && !prohibited && !filtered && !frozen;
// AArch64.GetNumEventCountersAccessible() // ======================================= // Return the number of event counters that can be accessed at the current Exception level. integer AArch64.GetNumEventCountersAccessible() // Software can reserve some counters for EL2 if PSTATE.EL IN {EL1, EL0} && EL2Enabled() then n = UInt(MDCR_EL2.HPMN); else n = GetNumEventCounters(); return n;
// AArch64.IncrementEventCounter() // =============================== // Increment the specified event counter by the specified amount. AArch64.IncrementEventCounter(integer idx, integer increment) old_value = UInt(PMEVCNTR_EL0[idx]); new_value = old_value + increment; if HavePMUv3p5() then PMEVCNTR_EL0[idx] = new_value<63:0>; lp = if AArch64.PMUCounterIsHyp(idx) then MDCR_EL2.HLP else PMCR_EL0.LP; ovflw = if lp == '1' then 64 else 32; else PMEVCNTR_EL0[idx] = ZeroExtend(new_value<31:0>); ovflw = 32; if old_value<64:ovflw> != new_value<64:ovflw> then PMOVSSET_EL0<idx> = '1'; PMOVSCLR_EL0<idx> = '1'; // Check for the CHAIN event from an even counter if idx<0> == '0' && idx + 1 < GetNumEventCounters() && (!HavePMUv3p5() || lp == '0') then AArch64.PMUEvent(PMU_EVENT_CHAIN, 1, idx + 1);
// AArch64.PMUCounterIsHyp // ======================= // Returns TRUE if a counter is reserved for use by EL2, FALSE otherwise. boolean AArch64.PMUCounterIsHyp(integer n) // Software can reserve some counters for EL2 if HaveEL(EL2) then resvd_for_el2 = n >= UInt(MDCR_EL2.HPMN) && n != CYCLE_COUNTER_ID; if !HaveFeatHPMN0() && MDCR_EL2.HPMN == '00000' then resvd_for_el2 = boolean UNKNOWN; else resvd_for_el2 = FALSE; return resvd_for_el2;
// AArch64.PMUCycle() // ================== AArch64.PMUCycle() if !HavePMUv3() || !AArch64.CountPMUEvents(CYCLE_COUNTER_ID) then return; if HaveAArch32() && PMCR_EL0.LC == '0' && PMCR_EL0.D == '1' && !HasElapsed64Cycles() then PMUEvent(PMU_EVENT_CPU_CYCLES); return; old_value = UInt(PMCCNTR_EL0); new_value = old_value + 1; PMCCNTR_EL0 = new_value<63:0>; if HaveAArch32() then ovflw = if PMCR_EL0.LC == '1' then 64 else 32; else ovflw = 64; if old_value<64:ovflw> != new_value<64:ovflw> then PMOVSSET_EL0.C = '1'; PMOVSCLR_EL0.C = '1'; AArch64.CheckForPMUOverflow(); PMUEvent(PMU_EVENT_CPU_CYCLES);
// AArch64.PMUEvent() // ================== // Generate a PMU Event. All the event counters are checked for the event. // If any of the counters overflow then an interrupt is raised. AArch64.PMUEvent(bits(16) event, integer increment) if !HavePMUv3() then return; // Count the event for idx = 0 to GetNumEventCounters() - 1 if PMEVTYPER_EL0[idx].evtCount == event && AArch64.CountPMUEvents(idx) then AArch64.IncrementEventCounter(idx, increment); AArch64.CheckForPMUOverflow(); // AArch64.PMUEvent() // ================== // Generate a PMU Event for a specific event counter. AArch64.PMUEvent(bits(16) event, integer increment, integer idx) if !HavePMUv3() then return; // Count the event if PMEVTYPER_EL0[idx].evtCount == event && AArch64.CountPMUEvents(idx) then AArch64.IncrementEventCounter(idx, increment); // This function is only called from other functions which will check for overflow later
// AArch64.PMUSwIncrement() // ======================== // Generate PMU Events on a write to PMSWINC_EL0. AArch64.PMUSwIncrement(bits(32) sw_incr) for idx = 0 to AArch64.GetNumEventCountersAccessible() - 1 if sw_incr<idx> == '1' then AArch64.PMUEvent(PMU_EVENT_SW_INCR, 1, idx); AArch64.CheckForPMUOverflow();
// CollectContextIDR1() // ==================== boolean CollectContextIDR1() if !StatisticalProfilingEnabled() then return FALSE; if PSTATE.EL == EL2 then return FALSE; if EL2Enabled() && HCR_EL2.TGE == '1' then return FALSE; return PMSCR_EL1.CX == '1';
// CollectContextIDR2() // ==================== boolean CollectContextIDR2() if !StatisticalProfilingEnabled() then return FALSE; if !EL2Enabled() then return FALSE; return PMSCR_EL2.CX == '1';
// CollectPhysicalAddress() // ======================== boolean CollectPhysicalAddress() if !StatisticalProfilingEnabled() then return FALSE; (secure, el) = ProfilingBufferOwner(); if ((!secure && HaveEL(EL2)) || IsSecureEL2Enabled()) then return PMSCR_EL2.PA == '1' && (el == EL2 || PMSCR_EL1.PA == '1'); else return PMSCR_EL1.PA == '1';
// CollectTimeStamp() // ================== TimeStamp CollectTimeStamp() if !StatisticalProfilingEnabled() then return TimeStamp_None; (-, el) = ProfilingBufferOwner(); if el == EL2 then if PMSCR_EL2.TS == '0' then return TimeStamp_None; else if PMSCR_EL1.TS == '0' then return TimeStamp_None; if !HaveECVExt() then PCT_el1 = '0':PMSCR_EL1.PCT<0>; // PCT<1> is RES0 else PCT_el1 = PMSCR_EL1.PCT; if PCT_el1 == '10' then // Reserved value (-, PCT_el1) = ConstrainUnpredictableBits(Unpredictable_PMSCR_PCT); if EL2Enabled() then if !HaveECVExt() then PCT_el2 = '0':PMSCR_EL2.PCT<0>; // PCT<1> is RES0 else PCT_el2 = PMSCR_EL2.PCT; if PCT_el2 == '10' then // Reserved value (-, PCT_el2) = ConstrainUnpredictableBits(Unpredictable_PMSCR_PCT); case PCT_el2 of when '00' return TimeStamp_Virtual; when '01' if el == EL2 then return TimeStamp_Physical; when '11' assert HaveECVExt(); // FEAT_ECV must be implemented if el == EL1 && PCT_el1 == '00' then return TimeStamp_Virtual; else return TimeStamp_OffsetPhysical; otherwise Unreachable(); case PCT_el1 of when '00' return TimeStamp_Virtual; when '01' return TimeStamp_Physical; when '11' assert HaveECVExt(); // FEAT_ECV must be implemented return TimeStamp_OffsetPhysical; otherwise Unreachable();
enumeration OpType { OpType_Load, // Any memory-read operation other than atomics, compare-and-swap, and swap OpType_Store, // Any memory-write operation, including atomics without return OpType_LoadAtomic, // Atomics with return, compare-and-swap and swap OpType_Branch, // Software write to the PC OpType_Other // Any other class of operation };
// ProfilingBufferEnabled() // ======================== boolean ProfilingBufferEnabled() if !HaveStatisticalProfiling() then return FALSE; (secure, el) = ProfilingBufferOwner(); non_secure_bit = if secure then '0' else '1'; return (!ELUsingAArch32(el) && non_secure_bit == SCR_EL3.NS && PMBLIMITR_EL1.E == '1' && PMBSR_EL1.S == '0');
// ProfilingBufferOwner() // ====================== (boolean, bits(2)) ProfilingBufferOwner() secure = if HaveEL(EL3) then (MDCR_EL3.NSPB<1> == '0') else IsSecure(); el = if HaveEL(EL2) && (!secure || IsSecureEL2Enabled()) && MDCR_EL2.E2PB == '00' then EL2 else EL1; return (secure, el);
// Barrier to ensure that all existing profiling data has been formatted, and profiling buffer // addresses have been translated such that writes to the profiling buffer have been initiated. // A following DSB completes when writes to the profiling buffer have completed. ProfilingSynchronizationBarrier();
// SPECollectRecord() // ================== // Returns TRUE if the sampled class of instructions or operations, as // determined by PMSFCR_EL1, are recorded and FALSE otherwise. boolean SPECollectRecord(bits(64) events, integer total_latency, OpType optype) assert StatisticalProfilingEnabled(); bits(64) mask = 0xAA<63:0>; // Bits [7,5,3,1] if HaveSVE() then mask<18:17> = Ones(); // Predicate flags if HaveTME() then mask<16> = '1'; if HaveStatisticalProfilingv1p1() then mask<11> = '1'; // Alignment Flag if HaveStatisticalProfilingv1p2() then mask<6> = '1'; // Not taken flag mask<63:48> = bits(16) IMPLEMENTATION_DEFINED; mask<31:24> = bits(8) IMPLEMENTATION_DEFINED; mask<15:12> = bits(4) IMPLEMENTATION_DEFINED; // Check for UNPREDICTABLE case if (HaveStatisticalProfilingv1p2() && PMSFCR_EL1.<FnE,FE> == '11' && !IsZero(PMSEVFR_EL1 AND PMSNEVFR_EL1 AND mask)) then if ConstrainUnpredictableBool(Unpredictable_BADPMSFCR) then return FALSE; else // Filtering by event if PMSFCR_EL1.FE == '1' && !IsZero(PMSEVFR_EL1) then e = events AND mask; m = PMSEVFR_EL1 AND mask; if !IsZero(NOT(e) AND m) then return FALSE; // Filtering by inverse event if (HaveStatisticalProfilingv1p2() && PMSFCR_EL1.FnE == '1' && !IsZero(PMSNEVFR_EL1)) then e = events AND mask; m = PMSNEVFR_EL1 AND mask; if !IsZero(e AND m) then return FALSE; // Filtering by type if PMSFCR_EL1.FT == '1' && !IsZero(PMSFCR_EL1.<B,LD,ST>) then case optype of when OpType_Branch if PMSFCR_EL1.B == '0' then return FALSE; when OpType_Load if PMSFCR_EL1.LD == '0' then return FALSE; when OpType_Store if PMSFCR_EL1.ST == '0' then return FALSE; when OpType_LoadAtomic if PMSFCR_EL1.<LD,ST> == '00' then return FALSE; otherwise return FALSE; // Filtering by latency if PMSFCR_EL1.FL == '1' && !IsZero(PMSLATFR_EL1.MINLAT) then if total_latency < UInt(PMSLATFR_EL1.MINLAT) then return FALSE; // Check for UNPREDICTABLE cases if ((PMSFCR_EL1.FE == '1' && IsZero(PMSEVFR_EL1 AND mask)) || (PMSFCR_EL1.FT == '1' && IsZero(PMSFCR_EL1.<B,LD,ST>)) || (PMSFCR_EL1.FL == '1' && IsZero(PMSLATFR_EL1.MINLAT))) then return ConstrainUnpredictableBool(Unpredictable_BADPMSFCR); if (HaveStatisticalProfilingv1p2() && ((PMSFCR_EL1.FnE == '1' && IsZero(PMSNEVFR_EL1 AND mask)) || (PMSFCR_EL1.<FnE,FE> == '11' && !IsZero(PMSEVFR_EL1 AND PMSNEVFR_EL1 AND mask)))) then return ConstrainUnpredictableBool(Unpredictable_BADPMSFCR); return TRUE;
// StatisticalProfilingEnabled() // ============================= boolean StatisticalProfilingEnabled() if !HaveStatisticalProfiling() || UsingAArch32() || !ProfilingBufferEnabled() then return FALSE; in_host = EL2Enabled() && HCR_EL2.TGE == '1'; (secure, el) = ProfilingBufferOwner(); if UInt(el) < UInt(PSTATE.EL) || secure != IsSecure() || (in_host && el == EL1) then return FALSE; case PSTATE.EL of when EL3 Unreachable(); when EL2 spe_bit = PMSCR_EL2.E2SPE; when EL1 spe_bit = PMSCR_EL1.E1SPE; when EL0 spe_bit = (if in_host then PMSCR_EL2.E0HSPE else PMSCR_EL1.E0SPE); return spe_bit == '1';
enumeration TimeStamp { TimeStamp_None, // No timestamp TimeStamp_CoreSight, // CoreSight time (IMPLEMENTATION DEFINED) TimeStamp_Physical, // Physical counter value with no offset TimeStamp_OffsetPhysical, // Physical counter value minus CNTPOFF_EL2 TimeStamp_Virtual }; // Physical counter value minus CNTVOFF_EL2
// AArch64.TakeExceptionInDebugState() // =================================== // Take an exception in Debug state to an Exception level using AArch64. AArch64.TakeExceptionInDebugState(bits(2) target_el, ExceptionRecord exception) assert HaveEL(target_el) && !ELUsingAArch32(target_el) && UInt(target_el) >= UInt(PSTATE.EL); if HaveIESB() then sync_errors = SCTLR[target_el].IESB == '1'; if HaveDoubleFaultExt() then sync_errors = sync_errors || (SCR_EL3.<EA,NMEA> == '11' && target_el == EL3); // SCTLR[].IESB and/or SCR_EL3.NMEA (if applicable) might be ignored in Debug state. if !ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then sync_errors = FALSE; else sync_errors = FALSE; if HaveTME() && TSTATE.depth > 0 then case exception.exceptype of when Exception_SoftwareBreakpoint cause = TMFailure_DBG; when Exception_Breakpoint cause = TMFailure_DBG; when Exception_Watchpoint cause = TMFailure_DBG; when Exception_SoftwareStep cause = TMFailure_DBG; otherwise cause = TMFailure_ERR; FailTransaction(cause, FALSE); SynchronizeContext(); // If coming from AArch32 state, the top parts of the X[] registers might be set to zero from_32 = UsingAArch32(); if from_32 then AArch64.MaybeZeroRegisterUppers(); MaybeZeroSVEUppers(target_el); AArch64.ReportException(exception, target_el); PSTATE.EL = target_el; PSTATE.nRW = '0'; PSTATE.SP = '1'; SPSR[] = bits(64) UNKNOWN; ELR[] = bits(64) UNKNOWN; // PSTATE.{SS,D,A,I,F} are not observable and ignored in Debug state, so behave as if UNKNOWN. PSTATE.<SS,D,A,I,F> = bits(5) UNKNOWN; PSTATE.IL = '0'; if from_32 then // Coming from AArch32 PSTATE.IT = '00000000'; PSTATE.T = '0'; // PSTATE.J is RES0 if (HavePANExt() && (PSTATE.EL == EL1 || (PSTATE.EL == EL2 && ELIsInHost(EL0))) && SCTLR[].SPAN == '0') then PSTATE.PAN = '1'; if HaveUAOExt() then PSTATE.UAO = '0'; if HaveBTIExt() then PSTATE.BTYPE = '00'; if HaveSSBSExt() then PSTATE.SSBS = bit UNKNOWN; if HaveMTEExt() then PSTATE.TCO = '1'; DLR_EL0 = bits(64) UNKNOWN; DSPSR_EL0 = bits(64) UNKNOWN; EDSCR.ERR = '1'; UpdateEDSCRFields(); // Update EDSCR processor state flags. if sync_errors then SynchronizeErrors(); EndOfInstruction();
// AArch64.WatchpointByteMatch() // ============================= boolean AArch64.WatchpointByteMatch(integer n, AccType acctype, bits(64) vaddress) integer top = AArch64.VAMax(); bottom = if DBGWVR_EL1[n]<2> == '1' then 2 else 3; // Word or doubleword byte_select_match = (DBGWCR_EL1[n].BAS<UInt(vaddress<bottom-1:0>)> != '0'); mask = UInt(DBGWCR_EL1[n].MASK); // If DBGWCR_EL1[n].MASK is non-zero value and DBGWCR_EL1[n].BAS is not set to '11111111', or // DBGWCR_EL1[n].BAS specifies a non-contiguous set of bytes behavior is CONSTRAINED // UNPREDICTABLE. if mask > 0 && !IsOnes(DBGWCR_EL1[n].BAS) then byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPMASKANDBAS); else LSB = (DBGWCR_EL1[n].BAS AND NOT(DBGWCR_EL1[n].BAS - 1)); MSB = (DBGWCR_EL1[n].BAS + LSB); if !IsZero(MSB AND (MSB - 1)) then // Not contiguous byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS); bottom = 3; // For the whole doubleword // If the address mask is set to a reserved value, the behavior is CONSTRAINED UNPREDICTABLE. if mask > 0 && mask <= 2 then (c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK); assert c IN {Constraint_DISABLED, Constraint_NONE, Constraint_UNKNOWN}; case c of when Constraint_DISABLED return FALSE; // Disabled when Constraint_NONE mask = 0; // No masking // Otherwise the value returned by ConstrainUnpredictableInteger is a not-reserved value if mask > bottom then // If the DBGxVR<n>_EL1.RESS field bits are not a sign extension of the MSB // of DBGBVR<n>_EL1.VA, it is UNPREDICTABLE whether they appear to be // included in the match. if !IsOnes(DBGBVR_EL1[n]<63:top>) && !IsZero(DBGBVR_EL1[n]<63:top>) then if ConstrainUnpredictableBool(Unpredictable_DBGxVR_RESS) then top = 63; WVR_match = (vaddress<top:mask> == DBGWVR_EL1[n]<top:mask>); // If masked bits of DBGWVR_EL1[n] are not zero, the behavior is CONSTRAINED UNPREDICTABLE. if WVR_match && !IsZero(DBGWVR_EL1[n]<mask-1:bottom>) then WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITS); else WVR_match = vaddress<top:bottom> == DBGWVR_EL1[n]<top:bottom>; return WVR_match && byte_select_match;
// AArch64.WatchpointMatch() // ========================= // Watchpoint matching in an AArch64 translation regime. boolean AArch64.WatchpointMatch(integer n, bits(64) vaddress, integer size, boolean ispriv, AccType acctype, boolean iswrite) assert !ELUsingAArch32(S1TranslationRegime()); assert n < NumWatchpointsImplemented(); // "ispriv" is: // * FALSE for all loads, stores, and atomic operations executed at EL0. // * FALSE if the access is unprivileged. // * TRUE for all other loads, stores, and atomic operations. enabled = DBGWCR_EL1[n].E == '1'; linked = DBGWCR_EL1[n].WT == '1'; isbreakpnt = FALSE; state_match = AArch64.StateMatch(DBGWCR_EL1[n].SSC, DBGWCR_EL1[n].HMC, DBGWCR_EL1[n].PAC, linked, DBGWCR_EL1[n].LBN, isbreakpnt, acctype, ispriv); ls_match = FALSE; if acctype == AccType_ATOMICRW then ls_match = (DBGWCR_EL1[n].LSC != '00'); else ls_match = (DBGWCR_EL1[n].LSC<(if iswrite then 1 else 0)> == '1'); value_match = FALSE; for byte = 0 to size - 1 value_match = value_match || AArch64.WatchpointByteMatch(n, acctype, vaddress + byte); return value_match && state_match && ls_match && enabled;
// AArch64.Abort() // =============== // Abort and Debug exception handling in an AArch64 translation regime. AArch64.Abort(bits(64) vaddress, FaultRecord fault) if IsDebugException(fault) then if fault.acctype == AccType_IFETCH then if UsingAArch32() && fault.debugmoe == DebugException_VectorCatch then AArch64.VectorCatchException(fault); else AArch64.BreakpointException(fault); else AArch64.WatchpointException(vaddress, fault); elsif fault.gpcf.gpf != GPCF_None && ReportAsGPCException(fault) then TakeGPCException(vaddress, fault); elsif fault.acctype == AccType_IFETCH then AArch64.InstructionAbort(vaddress, fault); else AArch64.DataAbort(vaddress, fault);
// AArch64.AbortSyndrome() // ======================= // Creates an exception syndrome record for Abort and Watchpoint exceptions // from an AArch64 translation regime. ExceptionRecord AArch64.AbortSyndrome(Exception exceptype, FaultRecord fault, bits(64) vaddress) exception = ExceptionSyndrome(exceptype); d_side = exceptype IN {Exception_DataAbort, Exception_NV2DataAbort, Exception_Watchpoint, Exception_NV2Watchpoint}; (exception.syndrome, exception.syndrome2) = AArch64.FaultSyndrome(d_side, fault); exception.vaddress = ZeroExtend(vaddress); if IPAValid(fault) then exception.ipavalid = TRUE; exception.NS = if fault.ipaddress.paspace == PAS_NonSecure then '1' else '0'; exception.ipaddress = fault.ipaddress.address; else exception.ipavalid = FALSE; return exception;
// AArch64.CheckPCAlignment() // ========================== AArch64.CheckPCAlignment() bits(64) pc = ThisInstrAddr(); if pc<1:0> != '00' then