49 FORCE_INLINE void SetWritable(SPageInfo& aPageInfo) |
61 FORCE_INLINE void SetWritable(SPageInfo& aPageInfo) |
50 { |
62 { |
51 if (!aPageInfo.IsDirty()) |
63 if (!aPageInfo.IsDirty()) |
52 {// This is the first mapping to write to the page so increase the |
64 {// This is the first mapping to write to the page so increase the |
53 // dirty page count. |
65 // dirty page count. |
54 aPageInfo.SetWritable(); |
|
55 iNumberOfDirtyPages++; |
66 iNumberOfDirtyPages++; |
56 } |
67 } |
|
68 aPageInfo.SetWritable(); |
57 } |
69 } |
58 |
70 |
59 FORCE_INLINE void SetClean(SPageInfo& aPageInfo) |
71 FORCE_INLINE void SetClean(SPageInfo& aPageInfo) |
60 { |
72 { |
61 __NK_ASSERT_DEBUG(iNumberOfDirtyPages); |
73 __NK_ASSERT_DEBUG(iNumberOfDirtyPages); |
232 TBool ReservePages(TUint aRequiredCount, TUint& aCount); |
244 TBool ReservePages(TUint aRequiredCount, TUint& aCount); |
233 |
245 |
234 /** |
246 /** |
235 */ |
247 */ |
236 void UnreservePages(TUint& aCount); |
248 void UnreservePages(TUint& aCount); |
|
249 |
|
250 /** |
|
251 Indicates whether there are any dirty pages available to be cleaned by #CleanSomePages. |
|
252 |
|
253 This is called by the page cleaner to work out whether it has any work to do. |
|
254 |
|
255 @return Whether there are any dirty pages in the oldest section of the live list. |
|
256 */ |
|
257 TBool HasPagesToClean(); |
|
258 |
|
259 /** |
|
260 Attempt to clean one or more dirty pages in one go. |
|
261 |
|
262 Called by the page cleaner to clean pages and by PageInAllocPage when needs to steal a page from |
|
263 the live list, but the oldest clean list is empty. |
|
264 |
|
265 May or may not succeed in acually cleaning any pages. |
|
266 |
|
267 @param aBackground Whether the activity should be ignored when determining whether the paging |
|
268 device is busy. This is used by the page cleaner. |
|
269 |
|
270 @return The number of pages this method attempted to clean. If it returns zero, there were no |
|
271 pages eligible to be cleaned. |
|
272 */ |
|
273 TInt CleanSomePages(TBool aBackground); |
237 |
274 |
238 /** |
275 /** |
239 Enumeration of instrumented paging events which only require the |
276 Enumeration of instrumented paging events which only require the |
240 SPageInfo object as an argument. |
277 SPageInfo object as an argument. |
241 */ |
278 */ |
345 @post MmuLock left unchanged. |
382 @post MmuLock left unchanged. |
346 */ |
383 */ |
347 void RemovePage(SPageInfo* aPageInfo); |
384 void RemovePage(SPageInfo* aPageInfo); |
348 |
385 |
349 /** |
386 /** |
|
387 Try to remove the oldest page from the live page list and perform #StealPage. |
|
388 |
|
389 @param aPageInfoOut Set to the SPageInfo pointer for the stolen page if any. |
|
390 |
|
391 @return KErrNone on success, KErrInUse if stealing failed or 1 to indicate the the oldest page |
|
392 was dirty and the PageCleaning mutex was not held. |
|
393 |
|
394 @pre MmuLock held |
|
395 @post MmuLock left unchanged. |
|
396 */ |
|
397 TInt TryStealOldestPage(SPageInfo*& aPageInfoOut); |
|
398 |
|
399 /** |
350 Remove the oldest page from the live page list and perform #StealPage. |
400 Remove the oldest page from the live page list and perform #StealPage. |
351 |
401 |
352 @pre MmuLock held |
402 @pre MmuLock held |
353 @post MmuLock left unchanged. |
403 @post MmuLock held (but may have been released by this function) |
354 */ |
404 */ |
355 SPageInfo* StealOldestPage(); |
405 SPageInfo* StealOldestPage(); |
356 |
406 |
357 /** |
407 /** |
358 Steal a page from the memory object (if any) which is using the page. |
408 Steal a page from the memory object (if any) which is using the page. |
359 If successful the returned page will be in the EUnknown state and the |
409 If successful the returned page will be in the EUnknown state and the |
360 cache state for the page is indeterminate. This is the same state as |
410 cache state for the page is indeterminate. This is the same state as |
361 if the page had been allocated by Mmu::AllocRam. |
411 if the page had been allocated by Mmu::AllocRam. |
362 |
412 |
363 @pre RamAlloc mutex held |
413 @pre RamAlloc mutex held |
|
414 @pre If the page is dirty the PageCleaning lock must be held. |
364 @pre MmuLock held |
415 @pre MmuLock held |
365 @post MmuLock held (but may have been released by this function) |
416 @post MmuLock held (but may have been released by this function) |
366 */ |
417 */ |
367 TInt StealPage(SPageInfo* aPageInfo); |
418 TInt StealPage(SPageInfo* aPageInfo); |
368 |
419 |
414 1. An unused page in the live page list. |
465 1. An unused page in the live page list. |
415 2. The systems free pool. |
466 2. The systems free pool. |
416 3. The oldest page from the live page list. |
467 3. The oldest page from the live page list. |
417 */ |
468 */ |
418 SPageInfo* PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags); |
469 SPageInfo* PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags); |
|
470 |
|
471 /** |
|
472 Called by CleanSomePages() to determine which pages should be cleaned. |
|
473 |
|
474 This deals with the complexity of page colouring, which means that pages can only be mapped at |
|
475 certain locations. When cleaning multiple pages at once we need to find a set of pages that we |
|
476 can map in memory sequentially. |
|
477 |
|
478 @pre MmuLock held |
|
479 |
|
480 @param aPageInfosOut Pointer to an array of SPageInfo pointers, which must be at least |
|
481 KMaxPagesToClean long. This will be filled in to indicate the pages to clean. |
|
482 |
|
483 @return The numnber of pages to clean. |
|
484 */ |
|
485 TInt SelectPagesToClean(SPageInfo** aPageInfosOut); |
419 |
486 |
420 /** |
487 /** |
421 If the number of young pages exceeds that specified by iYoungOldRatio then a |
488 If the number of young pages exceeds that specified by iYoungOldRatio then a |
422 single page is made 'old'. Call this after adding a new 'young' page. |
489 single page is made 'old'. Call this after adding a new 'young' page. |
423 |
490 |
509 TUint16 iYoungOldRatio; /**< Ratio of young to old pages in the live page list */ |
576 TUint16 iYoungOldRatio; /**< Ratio of young to old pages in the live page list */ |
510 SDblQue iYoungList; /**< Head of 'young' page list. */ |
577 SDblQue iYoungList; /**< Head of 'young' page list. */ |
511 TUint iYoungCount; /**< Number of young pages */ |
578 TUint iYoungCount; /**< Number of young pages */ |
512 SDblQue iOldList; /**< Head of 'old' page list. */ |
579 SDblQue iOldList; /**< Head of 'old' page list. */ |
513 TUint iOldCount; /**< Number of old pages */ |
580 TUint iOldCount; /**< Number of old pages */ |
514 #ifdef _USE_OLDEST_LISTS |
|
515 SDblQue iOldestCleanList; /**< Head of 'oldestClean' page list. */ |
581 SDblQue iOldestCleanList; /**< Head of 'oldestClean' page list. */ |
516 TUint iOldestCleanCount; /**< Number of 'oldestClean' pages */ |
582 TUint iOldestCleanCount; /**< Number of 'oldestClean' pages */ |
517 SDblQue iOldestDirtyList; /**< Head of 'oldestDirty' page list. */ |
583 SDblQue iOldestDirtyList; /**< Head of 'oldestDirty' page list. */ |
518 TUint iOldestDirtyCount; /**< Number of 'oldestDirty' pages */ |
584 TUint iOldestDirtyCount; /**< Number of 'oldestDirty' pages */ |
519 TUint16 iOldOldestRatio; /**< Ratio of old pages to oldest to clean and dirty in the live page list*/ |
585 TUint16 iOldOldestRatio; /**< Ratio of old pages to oldest to clean and dirty in the live page list*/ |
520 #endif |
|
521 TUint iNumberOfFreePages; |
586 TUint iNumberOfFreePages; |
522 TUint iNumberOfDirtyPages; /**< The total number of dirty pages in the paging cache. Protected by MmuLock */ |
587 TUint iNumberOfDirtyPages; /**< The total number of dirty pages in the paging cache. Protected by MmuLock */ |
523 TUint iInitMinimumPageCount;/**< Initial value for iMinimumPageCount */ |
588 TUint iInitMinimumPageCount;/**< Initial value for iMinimumPageCount */ |
524 TUint iInitMaximumPageCount;/**< Initial value for iMaximumPageCount */ |
589 TUint iInitMaximumPageCount;/**< Initial value for iMaximumPageCount */ |
525 TUint iReservePageCount; /**< Number of pages reserved for locking */ |
590 TUint iReservePageCount; /**< Number of pages reserved for locking */ |
527 iMinimumPageCount >= iMinimumPageLimit + iReservePageCount */ |
592 iMinimumPageCount >= iMinimumPageLimit + iReservePageCount */ |
528 SVMEventInfo iEventInfo; |
593 SVMEventInfo iEventInfo; |
529 |
594 |
530 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
595 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
531 public: |
596 public: |
532 void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime); |
597 void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime, TUint aCount); |
533 void ResetBenchmarkData(TPagingBenchmark aBm); |
598 void ResetBenchmarkData(TPagingBenchmark aBm); |
|
599 void ReadBenchmarkData(TPagingBenchmark aBm, SPagingBenchmarkInfo& aDataOut); |
|
600 TSpinLock iBenchmarkLock; |
534 SPagingBenchmarkInfo iBenchmarkInfo[EMaxPagingBm]; |
601 SPagingBenchmarkInfo iBenchmarkInfo[EMaxPagingBm]; |
535 #endif //__DEMAND_PAGING_BENCHMARKS__ |
602 #endif //__DEMAND_PAGING_BENCHMARKS__ |
536 }; |
603 }; |
537 |
604 |
538 extern DPager ThePager; |
605 extern DPager ThePager; |
539 |
606 |
540 |
607 |
541 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
608 #ifdef __DEMAND_PAGING_BENCHMARKS__ |
542 |
609 |
543 #define START_PAGING_BENCHMARK TUint32 _bmStart = NKern::FastCounter() |
610 #define START_PAGING_BENCHMARK TUint32 _bmStart = NKern::FastCounter() |
544 #define END_PAGING_BENCHMARK(bm) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter()) |
611 #define END_PAGING_BENCHMARK(bm) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter(), 1) |
|
612 #define END_PAGING_BENCHMARK_N(bm, n) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter(), (n)) |
545 |
613 |
546 #else |
614 #else |
547 |
615 |
548 #define START_PAGING_BENCHMARK |
616 #define START_PAGING_BENCHMARK |
549 #define END_PAGING_BENCHMARK(bm) |
617 #define END_PAGING_BENCHMARK(bm) |
|
618 #define END_PAGING_BENCHMARK_N(bm, n) |
550 #endif // __DEMAND_PAGING_BENCHMARKS__ |
619 #endif // __DEMAND_PAGING_BENCHMARKS__ |
551 |
620 |
552 |
621 |
553 FORCE_INLINE void DPager::Event(TEventSimple aEvent, SPageInfo* aPageInfo) |
622 FORCE_INLINE void DPager::Event(TEventSimple aEvent, SPageInfo* aPageInfo) |
554 { |
623 { |
686 Multiplier for number of request objects in pool per drive that supports paging. |
755 Multiplier for number of request objects in pool per drive that supports paging. |
687 */ |
756 */ |
688 const TInt KPagingRequestsPerDevice = 2; |
757 const TInt KPagingRequestsPerDevice = 2; |
689 |
758 |
690 |
759 |
691 class DPagingRequest; |
760 class DPoolPagingRequest; |
692 class DPageReadRequest; |
761 class DPageReadRequest; |
693 class DPageWriteRequest; |
762 class DPageWriteRequest; |
694 |
763 |
695 /** |
764 /** |
696 A pool of paging requests for use by a single paging device. |
765 A pool of paging requests for use by a single paging device. |
697 */ |
766 */ |
698 class DPagingRequestPool : public DBase |
767 class DPagingRequestPool : public DBase |
699 { |
768 { |
700 public: |
769 public: |
701 DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest); |
770 DPagingRequestPool(TUint aNumPageReadRequest, TBool aWriteRequest); |
702 DPageReadRequest* AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
771 DPageReadRequest* AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
703 DPageWriteRequest* AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
772 DPageWriteRequest* AcquirePageWriteRequest(DMemoryObject** aMemory, TUint* aIndex, TUint aCount); |
704 private: |
773 private: |
705 ~DPagingRequestPool(); |
774 ~DPagingRequestPool(); |
706 private: |
775 private: |
707 class TGroup |
776 class TGroup |
708 { |
777 { |
709 public: |
778 public: |
710 TGroup(TUint aNumRequests); |
779 TGroup(TUint aNumRequests); |
711 DPagingRequest* FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
780 DPoolPagingRequest* FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
712 DPagingRequest* GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
781 DPoolPagingRequest* GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
713 void Signal(DPagingRequest* aRequest); |
782 void Signal(DPoolPagingRequest* aRequest); |
714 public: |
783 public: |
715 TUint iNumRequests; |
784 TUint iNumRequests; |
716 DPagingRequest** iRequests; |
785 DPoolPagingRequest** iRequests; |
717 SDblQue iFreeList; |
786 SDblQue iFreeList; |
718 }; |
787 }; |
719 TGroup iPageReadRequests; |
788 TGroup iPageReadRequests; |
720 TGroup iPageWriteRequests; |
789 DPageWriteRequest* iPageWriteRequest; |
721 |
790 |
722 friend class DPagingRequest; |
791 friend class DPoolPagingRequest; |
723 friend class DPageReadRequest; |
792 friend class DPageReadRequest; |
724 friend class DPageWriteRequest; |
793 friend class DPageWriteRequest; |
725 }; |
794 }; |
726 |
795 |
727 |
796 |
729 Resources needed to service a paging request. |
798 Resources needed to service a paging request. |
730 */ |
799 */ |
731 class DPagingRequest : public SDblQueLink |
800 class DPagingRequest : public SDblQueLink |
732 { |
801 { |
733 public: |
802 public: |
734 DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup); |
803 enum |
735 void Release(); |
804 { |
|
805 EMaxPages = 4 |
|
806 }; |
|
807 DPagingRequest(); |
|
808 TLinAddr MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages); |
|
809 void UnmapPages(TBool aIMBRequired); |
|
810 void SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
811 void SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount); |
|
812 void ResetUse(); |
|
813 TBool CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
814 TBool CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount); |
|
815 TBool IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
816 public: |
|
817 DMutex* iMutex; /**< A mutex for synchronisation and priority inheritance. */ |
|
818 protected: |
|
819 Mmu::TTempMapping iTempMapping; |
|
820 private: |
|
821 // used to identify memory request is used for... |
|
822 TUint iUseRegionCount; |
|
823 DMemoryObject* iUseRegionMemory[EMaxPages]; |
|
824 TUint iUseRegionIndex[EMaxPages]; |
|
825 }; |
|
826 |
|
827 |
|
828 __ASSERT_COMPILE(DPagingRequest::EMaxPages >= KMaxPagesToClean); |
|
829 |
|
830 |
|
831 /** |
|
832 A paging request that is part of a pool of similar request objects. |
|
833 */ |
|
834 class DPoolPagingRequest : public DPagingRequest |
|
835 { |
|
836 public: |
|
837 DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup); |
|
838 void Release(); |
736 void Wait(); |
839 void Wait(); |
737 void Signal(); |
840 void Signal(); |
738 void SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
841 public: |
739 TBool CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
842 TInt iUsageCount; /**< How many threads are using or waiting for this object. */ |
740 TBool IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount); |
|
741 TLinAddr MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages); |
|
742 void UnmapPages(TBool aIMBRequired); |
|
743 public: |
|
744 TThreadMessage iMessage; /**< Used by the media driver to queue requests */ |
|
745 DMutex* iMutex; /**< A mutex for synchronisation and priority inheritance. */ |
|
746 TInt iUsageCount;/**< How many threads are using or waiting for this object. */ |
|
747 TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/ |
|
748 protected: |
|
749 Mmu::TTempMapping iTempMapping; |
|
750 private: |
843 private: |
751 DPagingRequestPool::TGroup& iPoolGroup; |
844 DPagingRequestPool::TGroup& iPoolGroup; |
752 // used to identify memory request is used for... |
|
753 DMemoryObject* iUseRegionMemory; |
|
754 TUint iUseRegionIndex; |
|
755 TUint iUseRegionCount; |
|
756 }; |
845 }; |
757 |
846 |
758 |
847 |
759 /** |
848 /** |
760 Resources needed to service a page in request. |
849 Resources needed to service a page in request. |
761 */ |
850 */ |
762 class DPageReadRequest : public DPagingRequest |
851 class DPageReadRequest : public DPoolPagingRequest |
763 { |
852 { |
764 public: |
853 public: |
765 inline DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) |
854 DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup); |
766 : DPagingRequest(aPoolGroup) |
|
767 {} |
|
768 TInt Construct(); |
855 TInt Construct(); |
769 enum |
|
770 { |
|
771 EMaxPages = 4 |
|
772 }; |
|
773 static TUint ReservedPagesRequired(); |
856 static TUint ReservedPagesRequired(); |
774 private: |
857 private: |
775 ~DPageReadRequest(); // can't delete |
858 ~DPageReadRequest(); // can't delete |
776 public: |
859 public: |
777 TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/ |
860 TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/ |
792 Resources needed to service a page out request. |
875 Resources needed to service a page out request. |
793 */ |
876 */ |
794 class DPageWriteRequest : public DPagingRequest |
877 class DPageWriteRequest : public DPagingRequest |
795 { |
878 { |
796 public: |
879 public: |
797 inline DPageWriteRequest(DPagingRequestPool::TGroup& aPoolGroup) |
880 DPageWriteRequest(); |
798 : DPagingRequest(aPoolGroup) |
881 void Release(); |
799 {} |
|
800 TInt Construct(); |
|
801 enum |
|
802 { |
|
803 EMaxPages = 1 |
|
804 }; |
|
805 private: |
882 private: |
806 ~DPageWriteRequest(); // can't delete |
883 ~DPageWriteRequest(); // can't delete |
807 private: |
|
808 static TInt iAllocNext; |
|
809 }; |
884 }; |
810 |
885 |
811 |
886 |
|
887 /** |
|
888 Class providing access to the mutex used to protect page cleaning operations; |
|
889 this is the mutex DPager::iPageCleaningLock. |
|
890 */ |
|
891 class PageCleaningLock |
|
892 { |
|
893 public: |
|
894 /** |
|
895 Acquire the lock. |
|
896 The lock may be acquired multiple times by a thread, and will remain locked |
|
897 until #Unlock has been used enough times to balance this. |
|
898 */ |
|
899 static void Lock(); |
|
900 |
|
901 /** |
|
902 Release the lock. |
|
903 |
|
904 @pre The current thread has previously acquired the lock. |
|
905 */ |
|
906 static void Unlock(); |
|
907 |
|
908 /** |
|
909 Return true if the current thread holds the lock. |
|
910 This is used for debug checks. |
|
911 */ |
|
912 static TBool IsHeld(); |
|
913 |
|
914 /** |
|
915 Create the lock. |
|
916 Called by DPager::Init3(). |
|
917 */ |
|
918 static void Init(); |
|
919 }; |
|
920 |
|
921 |
812 #endif |
922 #endif |