498 |
503 |
499 iPhysMemSyncTemp.Alloc(1); |
504 iPhysMemSyncTemp.Alloc(1); |
500 r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem); |
505 r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem); |
501 if(r!=KErrNone) |
506 if(r!=KErrNone) |
502 Panic(EPhysMemSyncMutexCreateFailed); |
507 Panic(EPhysMemSyncMutexCreateFailed); |
503 // VerifyRam(); |
508 |
|
509 #ifdef FMM_VERIFY_RAM |
|
510 VerifyRam(); |
|
511 #endif |
504 } |
512 } |
505 |
513 |
506 |
514 |
507 void Mmu::Init2FinalCommon() |
515 void Mmu::Init2FinalCommon() |
508 { |
516 { |
509 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon")); |
517 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon")); |
510 // hack, reduce free memory to <2GB... |
518 // Reduce free memory to <2GB... |
511 while(FreeRamInPages()>=0x80000000/KPageSize) |
519 while(FreeRamInPages()>=0x80000000/KPageSize) |
512 { |
520 { |
513 TPhysAddr dummyPage; |
521 TPhysAddr dummyPage; |
514 TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed); |
522 TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed); |
515 __NK_ASSERT_ALWAYS(r==KErrNone); |
523 __NK_ASSERT_ALWAYS(r==KErrNone); |
516 } |
524 } |
517 // hack, reduce total RAM to <2GB... |
525 // Reduce total RAM to <2GB... |
518 if(TheSuperPage().iTotalRamSize<0) |
526 if(TheSuperPage().iTotalRamSize<0) |
519 TheSuperPage().iTotalRamSize = 0x80000000-KPageSize; |
527 TheSuperPage().iTotalRamSize = 0x80000000-KPageSize; |
520 |
528 |
521 // Save current free RAM size - there can never be more free RAM than this |
529 // Save current free RAM size - there can never be more free RAM than this |
522 TUint maxFreePages = FreeRamInPages(); |
530 TUint maxFreePages = FreeRamInPages(); |
591 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign) |
620 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign) |
592 { |
621 { |
593 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign)); |
622 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign)); |
594 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
623 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); |
595 |
624 |
596 TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign); |
625 TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, aAlign); |
597 if(r!=KErrNone) |
626 if(r!=KErrNone) |
598 iRamAllocFailed = ETrue; |
627 iRamAllocFailed = ETrue; |
599 else |
628 else |
600 { |
629 { |
601 TUint pages = MM::RoundToPageCount(aBytes); |
630 TUint pages = MM::RoundToPageCount(aBytes); |
870 TPhysAddr pagePhys = *pages++; |
904 TPhysAddr pagePhys = *pages++; |
871 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
905 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid); |
872 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
906 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); |
873 PageFreed(pi); |
907 PageFreed(pi); |
874 |
908 |
875 // If this is an old page of a page being moved that was previously pinned |
909 switch (ThePager.PageFreed(pi)) |
876 // then make sure it is freed as discardable otherwise despite DPager::DonatePages() |
910 { |
877 // having marked it as discardable it would be freed as movable. |
911 case KErrNone: |
878 __NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1); |
912 --aCount; // pager has dealt with this page, so one less for us |
879 if (pi->PagedState() == SPageInfo::EPagedPinnedMoved) |
913 break; |
880 aZonePageType = EPageDiscard; |
914 case KErrCompletion: |
881 |
915 // This was a pager controlled page but it is no longer required. |
882 if(ThePager.PageFreed(pi)==KErrNone) |
916 __NK_ASSERT_DEBUG(aZonePageType == EPageMovable || aZonePageType == EPageDiscard); |
883 --aCount; // pager has dealt with this page, so one less for us |
917 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); |
884 else |
918 if (aZonePageType == EPageMovable) |
885 { |
919 {// This page was donated to the pager so have to free it here |
886 // All paged pages should have been dealt with by the pager above. |
920 // as aZonePageType is incorrect for this page but aPages may |
887 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); |
921 // contain a mixture of movable and discardable pages. |
888 *pagesOut++ = pagePhys; // store page address for freeing later |
922 MmuLock::Unlock(); |
|
923 iRamPageAllocator->FreeRamPages(&pagePhys, 1, EPageDiscard); |
|
924 aCount--; // We've freed this page here so one less to free later |
|
925 flash = 0; // reset flash count as we released the mmulock. |
|
926 MmuLock::Lock(); |
|
927 break; |
|
928 } |
|
929 // fall through.. |
|
930 default: |
|
931 // Free this page.. |
|
932 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); |
|
933 *pagesOut++ = pagePhys; // store page address for freeing later |
889 } |
934 } |
890 } |
935 } |
891 MmuLock::Unlock(); |
936 MmuLock::Unlock(); |
892 |
937 |
893 iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType); |
938 iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType); |
902 if(K::CheckForSimulatedAllocFail()) |
947 if(K::CheckForSimulatedAllocFail()) |
903 { |
948 { |
904 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory)); |
949 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory)); |
905 return KErrNoMemory; |
950 return KErrNoMemory; |
906 } |
951 } |
907 // Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram. |
952 // Only the pager sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram. |
908 __NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim)); |
953 __NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim)); |
909 #endif |
954 #endif |
910 TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); |
955 TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, aAlign+KPageShift); |
911 if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages) |
|
912 { |
|
913 // flush paging cache and retry... |
|
914 ThePager.FlushAll(); |
|
915 r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); |
|
916 } |
|
917 if(r!=KErrNone) |
956 if(r!=KErrNone) |
918 iRamAllocFailed = ETrue; |
957 iRamAllocFailed = ETrue; |
919 else |
958 else |
920 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
959 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
921 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); |
960 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); |
922 return r; |
961 return r; |
923 } |
962 } |
924 |
963 |
925 |
964 |
926 void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount) |
965 void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount) |
1002 pi->SetUnused(); |
1029 pi->SetUnused(); |
1003 } |
1030 } |
1004 MmuLock::Unlock(); |
1031 MmuLock::Unlock(); |
1005 |
1032 |
1006 iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed); |
1033 iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed); |
|
1034 |
|
1035 #ifdef BTRACE_KERNEL_MEMORY |
|
1036 if (BTrace::CheckFilter(BTrace::EKernelMemory)) |
|
1037 {// Only loop round each page if EKernelMemory tracing is enabled |
|
1038 pages = aPages; |
|
1039 pagesEnd = aPages + aCount; |
|
1040 while (pages < pagesEnd) |
|
1041 { |
|
1042 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, KPageSize, *pages++); |
|
1043 Epoc::DriverAllocdPhysRam -= KPageSize; |
|
1044 } |
|
1045 } |
|
1046 #endif |
1007 } |
1047 } |
1008 |
1048 |
1009 |
1049 |
1010 TInt Mmu::AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags) |
1050 TInt Mmu::AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags) |
1011 { |
1051 { |
1012 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,0x%x,d,%x)",aCount,aAlign,aFlags)); |
1052 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,0x%x,d,%x)",aCount,aAlign,aFlags)); |
1013 TInt r = AllocContiguousRam(aPhysAddr,aCount,aAlign,aFlags); |
1053 TInt r = AllocContiguousRam(aPhysAddr,aCount,aAlign,aFlags); |
1014 if (r!=KErrNone) |
1054 if (r!=KErrNone) |
1015 return r; |
1055 return r; |
|
1056 |
|
1057 // update page infos... |
|
1058 SetAllocPhysRam(aPhysAddr, aCount); |
|
1059 |
|
1060 return KErrNone; |
|
1061 } |
|
1062 |
|
1063 |
|
1064 void Mmu::FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount) |
|
1065 { |
|
1066 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(0x%08x,0x%x)",aPhysAddr,aCount)); |
1016 |
1067 |
1017 // update page infos... |
1068 // update page infos... |
1018 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
1069 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
1019 SPageInfo* piEnd = pi+aCount; |
1070 SPageInfo* piEnd = pi+aCount; |
1020 TUint flash = 0; |
1071 TUint flash = 0; |
1021 MmuLock::Lock(); |
1072 MmuLock::Lock(); |
1022 while(pi<piEnd) |
1073 while(pi<piEnd) |
1023 { |
1074 { |
1024 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
1075 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
1025 pi->SetPhysAlloc(); |
1076 __ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam)); |
|
1077 __ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam)); |
|
1078 pi->SetUnused(); |
1026 ++pi; |
1079 ++pi; |
1027 } |
1080 } |
1028 MmuLock::Unlock(); |
1081 MmuLock::Unlock(); |
1029 |
1082 |
|
1083 TUint bytes = aCount << KPageShift; |
|
1084 iRamPageAllocator->FreePhysicalRam(aPhysAddr, bytes); |
|
1085 |
|
1086 #ifdef BTRACE_KERNEL_MEMORY |
|
1087 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, bytes, aPhysAddr); |
|
1088 Epoc::DriverAllocdPhysRam -= bytes; |
|
1089 #endif |
|
1090 } |
|
1091 |
|
1092 |
|
1093 TInt Mmu::FreeRamZone(TUint aZoneId) |
|
1094 { |
|
1095 TPhysAddr zoneBase; |
|
1096 TUint zonePages; |
|
1097 TInt r = iRamPageAllocator->GetZoneAddress(aZoneId, zoneBase, zonePages); |
|
1098 if (r != KErrNone) |
|
1099 return r; |
|
1100 FreePhysicalRam(zoneBase, zonePages); |
1030 return KErrNone; |
1101 return KErrNone; |
1031 } |
1102 } |
1032 |
1103 |
1033 |
1104 |
1034 void Mmu::FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount) |
1105 TInt Mmu::ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags) |
1035 { |
1106 { |
1036 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(0x%08x,0x%x)",aPhysAddr,aCount)); |
1107 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags)); |
|
1108 aPhysAddr &= ~KPageMask; |
|
1109 TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr, aCount << KPageShift); |
|
1110 if(r != KErrNone) |
|
1111 return r; |
|
1112 |
|
1113 AllocatedPhysicalRam(aPhysAddr, aCount, aFlags); |
|
1114 return KErrNone; |
|
1115 } |
|
1116 |
|
1117 |
|
1118 void Mmu::AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags) |
|
1119 { |
|
1120 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocatedPhysicalRam(0x%08x,0x%x,d,%x)",aPhysAddr,aCount,aFlags)); |
|
1121 |
|
1122 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
1037 |
1123 |
1038 // update page infos... |
1124 // update page infos... |
|
1125 SetAllocPhysRam(aPhysAddr, aCount); |
|
1126 } |
|
1127 |
|
1128 |
|
1129 void Mmu::SetAllocPhysRam(TPhysAddr aPhysAddr, TUint aCount) |
|
1130 { |
1039 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
1131 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
1040 SPageInfo* piEnd = pi+aCount; |
1132 SPageInfo* piEnd = pi+aCount; |
1041 TUint flash = 0; |
1133 TUint flash = 0; |
1042 MmuLock::Lock(); |
1134 MmuLock::Lock(); |
1043 while(pi<piEnd) |
1135 while(pi<piEnd) |
1044 { |
1136 { |
1045 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
1137 MmuLock::Flash(flash, KMaxPageInfoUpdatesInOneGo); |
1046 __ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam)); |
1138 pi->SetPhysAlloc(); |
1047 __ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam)); |
|
1048 pi->SetUnused(); |
|
1049 ++pi; |
1139 ++pi; |
1050 } |
1140 } |
1051 MmuLock::Unlock(); |
1141 MmuLock::Unlock(); |
1052 |
1142 |
1053 iRamPageAllocator->FreePhysicalRam(aPhysAddr, aCount << KPageShift); |
1143 #ifdef BTRACE_KERNEL_MEMORY |
1054 } |
1144 TUint bytes = aCount << KPageShift; |
1055 |
1145 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, bytes, aPhysAddr); |
1056 |
1146 Epoc::DriverAllocdPhysRam += bytes; |
1057 TInt Mmu::ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags) |
1147 #endif |
1058 { |
1148 } |
1059 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags)); |
1149 |
1060 aPhysAddr &= ~KPageMask; |
1150 |
1061 TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr,(aCount << KPageShift)); |
1151 void Mmu::SetAllocPhysRam(TPhysAddr* aPageList, TUint aNumPages) |
1062 if(r!=KErrNone) |
1152 { |
1063 return r; |
1153 TPhysAddr* page = aPageList; |
1064 |
1154 TPhysAddr* pageEnd = aPageList + aNumPages; |
1065 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
|
1066 |
|
1067 // update page infos... |
|
1068 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
|
1069 SPageInfo* piEnd = pi+aCount; |
|
1070 TUint flash = 0; |
1155 TUint flash = 0; |
1071 MmuLock::Lock(); |
1156 MmuLock::Lock(); |
1072 while(pi<piEnd) |
1157 while (page < pageEnd) |
1073 { |
1158 { |
1074 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
1159 MmuLock::Flash(flash, KMaxPageInfoUpdatesInOneGo / 2); |
1075 pi->SetPhysAlloc(); |
1160 TPhysAddr pagePhys = *page++; |
1076 ++pi; |
1161 __NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid); |
|
1162 SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc(); |
1077 } |
1163 } |
1078 MmuLock::Unlock(); |
1164 MmuLock::Unlock(); |
1079 |
1165 |
1080 return KErrNone; |
1166 #ifdef BTRACE_KERNEL_MEMORY |
1081 } |
1167 if (BTrace::CheckFilter(BTrace::EKernelMemory)) |
1082 |
1168 {// Only loop round each page if EKernelMemory tracing is enabled |
1083 |
1169 TPhysAddr* pAddr = aPageList; |
1084 void Mmu::AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags) |
1170 TPhysAddr* pAddrEnd = aPageList + aNumPages; |
1085 { |
1171 while (pAddr < pAddrEnd) |
1086 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocatedPhysicalRam(0x%08x,0x%x,d,%x)",aPhysAddr,aCount,aFlags)); |
1172 { |
1087 |
1173 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++); |
1088 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); |
1174 Epoc::DriverAllocdPhysRam += KPageSize; |
1089 |
1175 } |
1090 // update page infos... |
1176 } |
1091 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); |
1177 #endif |
1092 SPageInfo* piEnd = pi+aCount; |
|
1093 TUint flash = 0; |
|
1094 MmuLock::Lock(); |
|
1095 while(pi<piEnd) |
|
1096 { |
|
1097 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo); |
|
1098 pi->SetPhysAlloc(); |
|
1099 ++pi; |
|
1100 } |
|
1101 MmuLock::Unlock(); |
|
1102 } |
1178 } |
1103 |
1179 |
1104 |
1180 |
1105 // |
1181 // |
1106 // Misc |
1182 // Misc |
1185 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour) |
1261 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour) |
1186 { |
1262 { |
1187 __NK_ASSERT_DEBUG(iSize>=1); |
1263 __NK_ASSERT_DEBUG(iSize>=1); |
1188 __NK_ASSERT_DEBUG(iCount==0); |
1264 __NK_ASSERT_DEBUG(iCount==0); |
1189 |
1265 |
|
1266 return Map(aPage, aColour, iBlankPte); |
|
1267 } |
|
1268 |
|
1269 |
|
1270 /** |
|
1271 Map a single physical page into this temporary mapping using the given page table entry (PTE) value. |
|
1272 |
|
1273 @param aPage The physical page to map. |
|
1274 @param aColour The required colour for the mapping. |
|
1275 @param aBlankPte The PTE value to use for mapping the page, |
|
1276 with the physical address component equal to zero. |
|
1277 |
|
1278 @return The linear address at which the page is mapped. |
|
1279 */ |
|
1280 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte) |
|
1281 { |
|
1282 __NK_ASSERT_DEBUG(iSize>=1); |
|
1283 __NK_ASSERT_DEBUG(iCount==0); |
|
1284 __NK_ASSERT_DEBUG(!(aBlankPte & ~KPageMask)); |
|
1285 |
|
1286 TUint colour = aColour & KPageColourMask; |
|
1287 TLinAddr addr = iLinAddr + (colour << KPageShift); |
|
1288 TPte* pPte = iPtePtr + colour; |
|
1289 iColour = colour; |
|
1290 |
|
1291 __ASSERT_DEBUG(*pPte == KPteUnallocatedEntry, MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1292 *pPte = (aPage & ~KPageMask) | aBlankPte; |
|
1293 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1294 InvalidateTLBForPage(addr | KKernelOsAsid); |
|
1295 |
|
1296 iCount = 1; |
|
1297 return addr; |
|
1298 } |
|
1299 |
|
1300 |
|
1301 /** |
|
1302 Map a number of physical pages into this temporary mapping. |
|
1303 |
|
1304 Supervisor read/write access and EMemoryAttributeStandard memory attributes apply. |
|
1305 |
|
1306 @param aPages The array of physical pages to map. |
|
1307 @param aCount The number of pages to map. |
|
1308 @param aColour The required colour for the first page. |
|
1309 Consecutive pages will be coloured accordingly. |
|
1310 |
|
1311 @return The linear address at which the first page is mapped. |
|
1312 */ |
|
1313 TLinAddr Mmu::TTempMapping::Map(TPhysAddr* aPages, TUint aCount, TUint aColour) |
|
1314 { |
|
1315 __NK_ASSERT_DEBUG(iSize>=aCount); |
|
1316 __NK_ASSERT_DEBUG(iCount==0); |
|
1317 |
1190 TUint colour = aColour&KPageColourMask; |
1318 TUint colour = aColour&KPageColourMask; |
1191 TLinAddr addr = iLinAddr+(colour<<KPageShift); |
1319 TLinAddr addr = iLinAddr+(colour<<KPageShift); |
1192 TPte* pPte = iPtePtr+colour; |
1320 TPte* pPte = iPtePtr+colour; |
1193 iColour = colour; |
1321 iColour = colour; |
1194 |
1322 |
1195 __ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1196 *pPte = (aPage&~KPageMask) | iBlankPte; |
|
1197 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1198 InvalidateTLBForPage(addr|KKernelOsAsid); |
|
1199 |
|
1200 iCount = 1; |
|
1201 return addr; |
|
1202 } |
|
1203 |
|
1204 /** |
|
1205 Map a single physical page into this temporary mapping using the given page table entry (PTE) value. |
|
1206 |
|
1207 @param aPage The physical page to map. |
|
1208 @param aColour The required colour for the mapping. |
|
1209 @param aBlankPte The PTE value to use for mapping the page, |
|
1210 with the physical address component equal to zero. |
|
1211 |
|
1212 @return The linear address at which the page is mapped. |
|
1213 */ |
|
1214 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte) |
|
1215 { |
|
1216 __NK_ASSERT_DEBUG(iSize>=1); |
|
1217 __NK_ASSERT_DEBUG(iCount==0); |
|
1218 |
|
1219 TUint colour = aColour&KPageColourMask; |
|
1220 TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1221 TPte* pPte = iPtePtr+colour; |
|
1222 iColour = colour; |
|
1223 |
|
1224 __ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
|
1225 *pPte = (aPage&~KPageMask) | aBlankPte; |
|
1226 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); |
|
1227 InvalidateTLBForPage(addr|KKernelOsAsid); |
|
1228 |
|
1229 iCount = 1; |
|
1230 return addr; |
|
1231 } |
|
1232 |
|
1233 |
|
1234 /** |
|
1235 Map a number of physical pages into this temporary mapping. |
|
1236 |
|
1237 Supervisor read/write access and EMemoryAttributeStandard memory attributes apply. |
|
1238 |
|
1239 @param aPages The array of physical pages to map. |
|
1240 @param aCount The number of pages to map. |
|
1241 @param aColour The required colour for the first page. |
|
1242 Consecutive pages will be coloured accordingly. |
|
1243 |
|
1244 @return The linear address at which the first page is mapped. |
|
1245 */ |
|
1246 TLinAddr Mmu::TTempMapping::Map(TPhysAddr* aPages, TUint aCount, TUint aColour) |
|
1247 { |
|
1248 __NK_ASSERT_DEBUG(iSize>=aCount); |
|
1249 __NK_ASSERT_DEBUG(iCount==0); |
|
1250 |
|
1251 TUint colour = aColour&KPageColourMask; |
|
1252 TLinAddr addr = iLinAddr+(colour<<KPageShift); |
|
1253 TPte* pPte = iPtePtr+colour; |
|
1254 iColour = colour; |
|
1255 |
|
1256 for(TUint i=0; i<aCount; ++i) |
1323 for(TUint i=0; i<aCount; ++i) |
1257 { |
1324 { |
1258 __ASSERT_DEBUG(pPte[i]==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
1325 __ASSERT_DEBUG(pPte[i]==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse)); |
1259 pPte[i] = (aPages[i]&~KPageMask) | iBlankPte; |
1326 pPte[i] = (aPages[i]&~KPageMask) | iBlankPte; |
1260 CacheMaintenance::SinglePteUpdated((TLinAddr)&pPte[i]); |
1327 CacheMaintenance::SinglePteUpdated((TLinAddr)&pPte[i]); |