(* Copyright (C) 1992, Digital Equipment Corporation *) (* All rights reserved. *) (* See the file COPYRIGHT for a full description. *) (* Last modified on Fri Jan 15 15:24:58 PST 1993 by jdd *) (* modified on Sat Jun 27 15:49:16 PDT 1992 by muller *) (* modified on Thu Feb 27 16:04:52 PST 1992 by kalsow *) UNSAFE MODULE RTHeap EXPORTS RTHeap, RTHeapRep, RTHeapPolicy, RTWeakRef; IMPORT Cstdlib, Ctypes, LowPerfTool, RT0, RT0u, RTHeapEvent, RTHeapDep, RTMain, RTMisc, RTParams, RTStack, Thread, ThreadF, Time, Unix, Uuio, Word; FROM RT0 IMPORT Typecode; FROM RTMisc IMPORT FatalError, FatalErrorI; FROM SmallIO IMPORT stderr, PutText, PutInt, PutHexa, Flush; (* This module implements the memory management part of the run-time system for Modula-3. Both the TRACED and UNTRACED referents are processed here. The allocator/garbage collector for the TRACED referents is an adaptation of the algorithm presented in the WRL Research Report 88/2, ``Compacting Garbage Collection with Ambiguous Roots'', by Joel F. Bartlett; see this report for a detailed presentation. John DeTreville modified it to be VM-synchronized. The allocator/garbage collector for the UNTRACED referents is simply malloc/free. *) (* ------------------------------- low-level allocation and collection *) (* We assume that references (values of the types ADDRESS and REFANY) are the addresses of addressable locations and that locations with successive addresses are contiguous (that is, if a points to a n-locations referent then these n locations are at addresses a, a+1, ..., a+n-1). The memory is viewed as a collection of pages. Each page has a number that identifies it, based on the addresses that are part of this page: page p contains the addresses p * BytesPerPage to (p+1) * BytesPerPage - 1. The page size must be a multiple of the header size (see below). Given our conventions about page boundaries, this implies that the first location of a page is properly aligned for a Header. *) (* The array desc and the global variables p0, and p1 describe the pages that are part of the traced heap. Either p0 and p1 are equal to Nil and no pages are allocated; or both are valid pages and page p is allocated iff | p0 <= p < p1 | AND desc[p - p0] != Unallocated NUMBER (desc) must be equal to p1 - p0 if there are allocated pages. Index i in desc correspond to page i + p0; that is p0 is the number of the first page available in desc, and it must be in [p0 .. p1) if there are allocated pages. *) (* We keep the number of allocated pages in a global variable; it should satify the invariant: | allocatedPages = sigma (i = p0, p1-1, | space [i - p0] # Unallocated) | if there are allocated pages, | = 0 otherwise. We also keep the number of active pages in a global; it satisfies: | activePages = sigma (i = p0, p1-1, | space [i - p0] = nextSpace) | if there are allocated pages, | = 0 otherwise. *) (* Each referent is immediately preceded by a header that describes the type of the referent. In the user world, this header is not visible; that is, a REFANY is the address of the referent, not the address of the header. Each referent is immediately followed by padding space so the combined size referent size + padding is a multiple of the header size. Actually, the low level routines are given a data size which is the sum of the referent size and padding size and assume this data size is a multiple of the header size. With this padding, addresses of headers and referent will always be multiple of ADRSIZE (Header). The combination of header/referent/padding space is called a "heap object". The size of a heap object is the size of the header, plus the size of the referent, plus the size of the padding. The alignment of a heap object is the greatest of the alignment of header and the alignment of the referent. We make the following assumptions: - alignment of headers is such what the addressable location following any properly aligned header is properly aligned for the type ADDRESS; and, for every referent: referent adrSize + padding adrSize >= ADRSIZE (ADDRESS) [During the garbage collection, we move heap objects. But we need to keep the forwarding information somewhere. This condition ensures that we can store the new address of the referent in the first word of the old referent.] - the pages are aligned more strictly than the headers (this means that the page size is a multiple of the header alignment). [We can put a header at the beginning of a page] *) TYPE RefReferent = ADDRESS; RefHeader = UNTRACED REF Header; PROCEDURE HeaderOf (r: RefReferent): RefHeader = BEGIN RETURN LOOPHOLE(r - ADRSIZE(Header), RefHeader); END HeaderOf; PROCEDURE TypecodeOf (r: RefReferent): Typecode = BEGIN RETURN LOOPHOLE(r - ADRSIZE(Header), RefHeader).typecode; END TypecodeOf; (* If a page is allocated, it can be normal or continued. In the first case, there is a heap object just at the beginning of the page and others following. The second case occurs when a heap object was too large to fit on a page: it starts at the beginning of a normal page and overflows on contiguous continued pages. Whatever space is left on the last continued page is never used for another object or filler. In other words, all the headers are on normal pages. Heap objects do not need to be adjacent. Indeed, alignment constraints would make it difficult to ensure that property. Filler objects may appear before objects to align them, or after the last object on a normal page to fill the page. *) (* We need to be able to determine the size of an referent during collection; here is a functions to do just that. It must be called with a non-nil pointer to the Header of a heap object that is there (has not been moved). *) PROCEDURE ReferentSize (h: RefHeader): CARDINAL = VAR res: INTEGER; tc := h.typecode; BEGIN IF tc = Fill_1_type THEN RETURN 0; END; IF tc = Fill_N_type THEN res := LOOPHOLE(h + ADRSIZE(Header), UNTRACED REF INTEGER)^; RETURN res - BYTESIZE(Header); END; WITH def = RT0u.types[tc]^ DO IF def.nDimensions = 0 THEN (* the typecell datasize tells the truth *) RETURN def.dataSize; END; (* ELSE, the referent is an open array; it has the following layout: | pointer to the elements (ADDRESS) | size 1 | .... | size n | optional padding | elements | .... where n is the number of open dimensions (given by the definition) and each size is the number of elements along the dimension *) VAR sizes := LOOPHOLE(h + ADRSIZE(Header) + ADRSIZE(UNTRACED REF INTEGER), (* elt pointer*) UNTRACED REF INTEGER); BEGIN res := 1; FOR i := 0 TO def.nDimensions - 1 DO res := res * sizes^; INC(sizes, ADRSIZE(INTEGER)); END; res := res * def.elementSize; END; res := RTMisc.Upper(res + def.dataSize, BYTESIZE(Header)); END; (*WITH def*) RETURN res; END ReferentSize; (* The convention about page numbering allows for a simple conversion from an address to the number of the page in which it is, as well as from a page number to the first address is contains: *) PROCEDURE ReferentToPage (r: RefReferent): Page = VAR p: INTEGER; BEGIN p := LOOPHOLE(r, INTEGER) DIV BytesPerPage; IF p < p0 OR p >= p1 OR desc[p - p0].space = Space.Unallocated THEN RETURN Nil; ELSE RETURN p; END; END ReferentToPage; PROCEDURE HeaderToPage (r: RefHeader): Page = VAR p: INTEGER; BEGIN p := LOOPHOLE(r, INTEGER) DIV BytesPerPage; IF p < p0 OR p >= p1 OR desc[p - p0].space = Space.Unallocated THEN RETURN Nil; ELSE RETURN p; END; END HeaderToPage; PROCEDURE PageToHeader (p: Page): RefHeader = BEGIN RETURN LOOPHOLE(p * BytesPerPage, RefHeader); END PageToHeader; PROCEDURE PageToAddress (p: Page): ADDRESS = BEGIN RETURN LOOPHOLE(p * BytesPerPage, ADDRESS); END PageToAddress; (* We remember where we should look for free space with the following globals: *) VAR newPtr, newBoundary: RefHeader; (* memory in [newPtr, newBoundary) is available to AllocForNew *) pureCopyPtr, pureCopyBoundary: RefHeader; (* memory in [pureCopyPtr, pureCopyBoundary) is available to AllocForCopy for pure objects (objects with no REFs) *) impureCopyPtr, impureCopyBoundary: RefHeader; (* memory in [impureCopyPtr, impureCopyBoundary) is available to AllocForCopy for impure objects (objects with REFs) *) (* To move a heap object to the new space, modifying the original reference to it *) PROCEDURE Move (<*UNUSED*> arg : REFANY; cp : ADDRESS; <*UNUSED*> root: ADDRESS; <*UNUSED*> kind: RT0.RefType) = VAR refref := LOOPHOLE(cp, UNTRACED REF RefReferent); ref := refref^; BEGIN IF ref = NIL THEN RETURN; END; VAR p := ReferentToPage(ref); BEGIN IF p = Nil THEN RETURN; END; VAR pi := p - p0; oldHeader := HeaderOf(ref); BEGIN IF desc[pi].space # Space.Previous THEN RETURN; (* nothing to do *) END; IF desc[pi].note = Note.Frozen THEN (* if the page contains frozen refs, just promote it *) IF desc[pi].pure THEN PromotePage( p, Desc{space := Space.Current, generation := copyGeneration, pure := TRUE, note := Note.Frozen, gray := FALSE, protected := FALSE, continued := FALSE}); ELSE PromotePage( p, Desc{space := Space.Current, generation := copyGeneration, pure := FALSE, note := Note.Frozen, gray := TRUE, protected := FALSE, continued := FALSE}); desc[pi].link := impureCopyStack; impureCopyStack := p; END; ELSIF p + 1 < p1 AND desc[pi + 1].continued THEN (* if this is a large object, just promote the pages *) IF RT0u.types[oldHeader.typecode].mapProc = NIL THEN PromotePage( p, Desc{space := Space.Current, generation := copyGeneration, pure := TRUE, note := Note.Large, gray := FALSE, protected := FALSE, continued := FALSE}); ELSE PromotePage( p, Desc{space := Space.Current, generation := copyGeneration, pure := FALSE, note := Note.Large, gray := TRUE, protected := FALSE, continued := FALSE}); desc[pi].link := impureCopyStack; impureCopyStack := p; END; ELSIF oldHeader.forwarded THEN (* if already moved, just update the reference *) refref^ := LOOPHOLE(ref, UNTRACED REF RefReferent)^; ELSE <* ASSERT gcOff = 0 *> (* move the object *) WITH def = RT0u.types[oldHeader.typecode] DO VAR dataSize := ReferentSize(oldHeader); np : RefReferent; BEGIN IF def.mapProc # NIL THEN np := AllocForImpureCopy(dataSize, def.dataAlignment); ELSE np := AllocForPureCopy(dataSize, def.dataAlignment); END; VAR newHeader := HeaderOf(np); BEGIN RTMisc.Copy( oldHeader, newHeader, ADRSIZE(Header) + dataSize); END; IF def.nDimensions # 0 THEN (* open array: update the internal pointer *) LOOPHOLE(np, UNTRACED REF ADDRESS)^ := np + def.dataSize; END; oldHeader.forwarded := TRUE; LOOPHOLE(ref, UNTRACED REF RefReferent)^ := np; refref^ := np; END; END; END; END; END; END Move; (* Determines whether a REF has yet been moved into the new space. Follows the logic in "Move".*) PROCEDURE Moved (ref: RefReferent): BOOLEAN = BEGIN IF ref = NIL THEN RETURN TRUE; END; (* check the space *) VAR p := ReferentToPage(ref); BEGIN IF p = Nil OR desc[p - p0].space # Space.Previous THEN RETURN TRUE; END; END; (* check the forwarded bit *) IF HeaderOf(LOOPHOLE(ref, ADDRESS)).forwarded THEN RETURN TRUE; END; (* not moved *) RETURN FALSE; END Moved; (* When an allocated page is referenced by the stack, we have to move it to the next space and insert it in the list of promoted pages. In the case where the page is actually part of a group of pages for a big referent, we have to promote all these pages to the new space, but only the first one needs to be inserted in the queue, as it is the only one containing referent headers. This routine is passed to the Threads implementation. It is called for each stack, where start and stop are the addresses of the first and last word of the stack under consideration. *) PROCEDURE NoteStackLocations (start, stop: ADDRESS) = VAR fp := start; firstAllocatedAddress := PageToAddress(p0); firstNonAllocatedAddress := PageToAddress(p1); p : ADDRESS; pp : Page; BEGIN WHILE fp <= stop DO p := LOOPHOLE(fp, UNTRACED REF ADDRESS)^; IF firstAllocatedAddress <= p AND p < firstNonAllocatedAddress THEN pp := LOOPHOLE(p, INTEGER) DIV BytesPerPage; IF desc[pp - p0].space = Space.Previous THEN VAR fp := FirstPage(pp); BEGIN <* ASSERT desc[fp - p0].space = Space.Previous *> IF desc[fp - p0].pure THEN PromotePage(fp, Desc{space := Space.Current, pure := TRUE, note := Note.AmbiguousRoot, gray := FALSE, generation := copyGeneration, protected := FALSE, continued := FALSE}); ELSE PromotePage(fp, Desc{space := Space.Current, pure := FALSE, note := Note.AmbiguousRoot, gray := TRUE, generation := copyGeneration, protected := FALSE, continued := FALSE}); desc[fp - p0].link := impureCopyStack; impureCopyStack := fp; END; END; END; END; INC(fp, RTStack.PointerAlignment); END; END NoteStackLocations; PROCEDURE PromotePage (p: Page; d: Desc) = BEGIN <* ASSERT desc[p - p0].space = Space.Previous *> <* ASSERT NOT desc[p - p0].continued*> VAR n := PageCount(p); BEGIN desc[p - p0] := d; IF n > 1 THEN VAR dd := d; BEGIN dd.continued := TRUE; FOR pp := p + 1 TO p + n - 1 DO desc[pp - p0] := dd; END; END; END; IF perfOn THEN PerfChange(p, n); END; IF d.space = Space.Current THEN IF n = 1 THEN INC(smallPromotionPages, 1); ELSE INC(largePromotionPages, n); END; END; END; END PromotePage; PROCEDURE InsertFiller (start: RefHeader; n: INTEGER) = BEGIN IF n = 0 THEN (* nothing to do *) ELSIF n = ADRSIZE(Header) THEN start^ := FillHeader1; ELSIF n >= ADRSIZE(Header) + ADRSIZE(INTEGER) THEN start^ := FillHeaderN; LOOPHOLE(start + ADRSIZE(Header), UNTRACED REF INTEGER)^ := n; ELSE <* ASSERT FALSE *> END; END InsertFiller; CONST TracedRefTypes = RT0.RefTypeSet{RT0.RefType.Traced}; TYPE CollectorState = {Zero, One, Two, Three, Four, Five}; VAR collectorState := CollectorState.Zero; VAR threshold: CARDINAL := InitialBytes DIV 4 DIV BytesPerPage - 1; (* start a collection as soon as current space gets this big; the initial value is 64KB *) VAR partialCollection: BOOLEAN; (* whether the collection in progress is partial, involving only the newer generation *) partialCollectionNext: BOOLEAN := FALSE; (* whether the next collection should be partial *) VAR collectorOn: BOOLEAN := FALSE; VAR copyGeneration: Generation := Generation.Younger; VAR signalBackground := FALSE; (* should signal background collector thread *) signalWeak := FALSE; (* should signal weak cleaner thread *) PROCEDURE CollectEnough () = BEGIN IF collectorOn OR gcOff > 0 THEN RETURN; END; IF Behind() THEN CollectorOn(); IF incremental AND RTHeapDep.VM AND vmOff = 0 THEN REPEAT CollectSome(); UNTIL NOT Behind() ELSE WHILE collectorState = CollectorState.Zero DO CollectSome(); END; REPEAT CollectSome(); UNTIL collectorState = CollectorState.Zero; END; CollectorOff(); END; END CollectEnough; PROCEDURE Behind (): BOOLEAN = BEGIN IF collectorState = CollectorState.Zero THEN RETURN smallCopyPages + largeCopyPages + smallPromotionPages + largePromotionPages + smallNewPages + largeNewPages >= threshold; ELSE RETURN FLOAT(smallNewPages + largeNewPages) * gcRatio >= FLOAT(smallCopyPages + largeCopyPages); END; END Behind; VAR timeUsedOnEntry: REAL; (* time used when entered collector *) PROCEDURE CollectorOn () = BEGIN <* ASSERT RT0u.inCritical > 0 *> <* ASSERT NOT collectorOn *> collectorOn := TRUE; IF RTHeapDep.VM THEN timeUsedOnEntry := RTHeapDep.TimeUsed(); END; IF impureCopyPage # Nil THEN <* ASSERT desc[impureCopyPage - p0].gray *> <* ASSERT desc[impureCopyPage - p0].protected *> Unprotect(impureCopyPage); END; END CollectorOn; PROCEDURE CollectorOff () = BEGIN <* ASSERT RT0u.inCritical > 0 *> <* ASSERT collectorOn *> IF impureCopyPage # Nil THEN <* ASSERT desc[impureCopyPage - p0].gray *> <* ASSERT NOT desc[impureCopyPage - p0].protected *> Protect(impureCopyPage, readable := FALSE, writable := FALSE); END; VAR p := impureCopyStack; BEGIN WHILE p # Nil DO IF desc[p - p0].gray AND NOT desc[p - p0].protected THEN Protect(p, readable := FALSE, writable := FALSE); END; p := desc[p - p0].link; END; END; collectorOn := FALSE; IF signalBackground THEN signalBackground := FALSE; Thread.Signal(backgroundCondition); END; IF signalWeak THEN signalWeak := FALSE; Thread.Signal(weakCondition); END; IF RTHeapDep.VM THEN cycleCost := cycleCost + (RTHeapDep.TimeUsed() - timeUsedOnEntry); END; END CollectorOff; PROCEDURE CollectSome () = BEGIN CASE collectorState OF | CollectorState.Zero => CollectSomeInStateZero(); | CollectorState.One => CollectSomeInStateOne(); | CollectorState.Two => CollectSomeInStateTwo(); | CollectorState.Three => CollectSomeInStateThree(); | CollectorState.Four => CollectSomeInStateFour(); | CollectorState.Five => CollectSomeInStateFive(); END; END CollectSome; (* Start a collection *) VAR cycleCost : REAL := 0.0; (* running cost of current cycle *) cycleLength: CARDINAL := 1; (* current planned cycle length *) cycleL : CARDINAL := 0; (* length of current cycle, so far *) cycleNews : CARDINAL; (* the number of new pages this cycle *) minPrefixAvgCost: REAL; (* minimum average cost for a prefix of this cycle *) minCycleL: CARDINAL; (* the length of that prefix *) PROCEDURE CollectSomeInStateZero () = BEGIN (* compute some costs relative to previous collection *) INC(cycleNews, smallNewPages + largeNewPages); VAR prefixAvgCost := cycleCost / FLOAT(cycleNews); BEGIN IF prefixAvgCost < minPrefixAvgCost THEN minPrefixAvgCost := prefixAvgCost; minCycleL := cycleL; END; END; (* make generational decisions *) IF generational AND RTHeapDep.VM AND vmOff = 0 THEN copyGeneration := Generation.Older; partialCollection := partialCollectionNext AND cycleL < cycleLength; IF NOT partialCollection THEN IF minCycleL = cycleLength THEN cycleLength := cycleLength + 1; ELSE cycleLength := MAX(cycleLength - 1, 1); END; END; ELSE copyGeneration := Generation.Younger; partialCollection := FALSE; END; partialCollectionNext := TRUE; IF partialCollection THEN INC(cycleL); ELSE cycleL := 1; cycleCost := 0.0; cycleNews := 0; minPrefixAvgCost := LAST(REAL); minCycleL := 0; END; VAR m := monitorsHead; BEGIN WHILE m # NIL DO m.before(); m := m.next; END; END; IF perfOn THEN PerfBegin(); END; (* fill the rest of the current page *) InsertFiller(newPtr, newBoundary - newPtr); newPage := Nil; newStack := Nil; newPtr := NIL; newBoundary := NIL; INC(collections); (* flip spaces; newspace becomes oldspace *) FOR p := p0 TO p1 - 1 DO IF desc[p - p0].space = Space.Current THEN desc[p - p0].space := Space.Previous; IF perfOn THEN PerfChange(p, 1); END; END; END; IF perfOn THEN PerfFlip(); END; (* The 'new' nextSpace is empty *) smallNewPages := 0; largeNewPages := 0; smallCopyPages := 0; largeCopyPages := 0; smallPromotionPages := 0; largePromotionPages := 0; FOR p := p0 TO p1 - 1 DO IF desc[p - p0].space = Space.Previous AND NOT desc[p - p0].continued THEN IF desc[p - p0].generation = Generation.Older THEN IF partialCollection THEN <* ASSERT copyGeneration = Generation.Older *> IF desc[p - p0].protected THEN <* ASSERT NOT desc[p - p0].pure *> PromotePage(p, Desc{space := Space.Current, generation := copyGeneration, pure := FALSE, note := Note.OlderGeneration, gray := FALSE, protected := TRUE, continued := FALSE}); ELSE IF desc[p - p0].pure THEN PromotePage( p, Desc{space := Space.Current, generation := copyGeneration, pure := TRUE, note := Note.OlderGeneration, gray := FALSE, protected := FALSE, continued := FALSE}); ELSE PromotePage( p, Desc{space := Space.Current, generation := copyGeneration, pure := FALSE, note := Note.OlderGeneration, gray := TRUE, protected := FALSE, continued := FALSE}); desc[p - p0].link := impureCopyStack; impureCopyStack := p; END; END; ELSE IF desc[p - p0].protected THEN Unprotect(p); END; END; ELSE <* ASSERT NOT desc[p - p0].protected *> END; END; END; (* now nothing in the previous space is protected or in the older generation *) (* Examine the stacks for possible pointers *) ThreadF.ProcessStacks(NoteStackLocations); (* Mark the pages pointed by the ice box *) FOR i := FIRST(iceBuckets^) TO LAST(iceBuckets^) DO VAR entry := iceBuckets[i]; BEGIN WHILE entry # NIL DO VAR pp := ReferentToPage(LOOPHOLE(entry.k, RefReferent)); BEGIN IF desc[pp - p0].space # Space.Current THEN PromotePage(pp, Desc{space := Space.Previous, generation := copyGeneration, pure := desc[pp - p0].pure, note := Note.Frozen, gray := FALSE, protected := FALSE, continued := FALSE}); END; END; entry := entry.next; END; END; END; (* Examine the global variables for possible pointers *) RTMain.GlobalMapProc(Move, NIL, TracedRefTypes); IF perfOn THEN PerfPromotedRoots(); END; collectorState := CollectorState.One; IF backgroundWaiting THEN signalBackground := TRUE; END; END CollectSomeInStateZero; (* Clean gray nodes *) PROCEDURE CollectSomeInStateOne () = BEGIN IF NOT CopySome() THEN collectorState := CollectorState.Two; END; IF backgroundWaiting THEN signalBackground := TRUE; END; END CollectSomeInStateOne; (* Walk weakly-referenced nodes to determine order in which to do cleanup, then cleanup gray nodes. This should be broken down into parts, since it may be a lengthy operation. *) PROCEDURE CollectSomeInStateTwo () = BEGIN PreHandleWeakRefs(); collectorState := CollectorState.Three; IF backgroundWaiting THEN signalBackground := TRUE; END; END CollectSomeInStateTwo; (* Clean gray nodes *) PROCEDURE CollectSomeInStateThree () = BEGIN (* recursively copy all objects reachable from promoted objects. marks "marka" and "markb" are cleared when objects move to the new space. *) IF NOT CopySome() THEN PostHandleWeakRefs(); (* must be called with no gray objects *) signalWeak := TRUE; collectorState := CollectorState.Four; END; IF backgroundWaiting THEN signalBackground := TRUE; END; END CollectSomeInStateThree; (* Clean gray nodes *) PROCEDURE CollectSomeInStateFour () = BEGIN IF NOT CopySome() THEN collectorState := CollectorState.Five; END; IF backgroundWaiting THEN signalBackground := TRUE; END; END CollectSomeInStateFour; PROCEDURE CollectSomeInStateFive () = BEGIN (* free all oldspace pages; oldspace becomes freespace *) FOR i := 0 TO p1 - p0 - 1 DO IF desc[i].space = Space.Previous THEN desc[i].space := Space.Free; desc[i].continued := FALSE; <* ASSERT NOT desc[i].protected *> IF perfOn THEN PerfChange(p0 + i, 1); END; END; END; RebuildFreelist(); (* fill the rest of the current copy pages *) InsertFiller(pureCopyPtr, pureCopyBoundary - pureCopyPtr); InsertFiller(impureCopyPtr, impureCopyBoundary - impureCopyPtr); IF impureCopyPage # Nil THEN desc[impureCopyPage - p0].gray := FALSE; IF perfOn THEN PerfChange(impureCopyPage, 1); END; IF desc[impureCopyPage - p0].generation = Generation.Older THEN <* ASSERT desc[impureCopyPage - p0].space = Space.Current *> Protect(impureCopyPage, readable := TRUE, writable := FALSE); END; impureCopyPage := Nil; END; <* ASSERT impureCopyStack = Nil *> pureCopyPage := Nil; pureCopyStack := Nil; impureCopyPtr := NIL; impureCopyBoundary := NIL; pureCopyPtr := NIL; pureCopyBoundary := NIL; (* keep only the reachable frozen refs *) FOR i := FIRST(iceBuckets^) TO LAST(iceBuckets^) DO VAR prev : IceCube := NIL; entry := iceBuckets[i]; BEGIN WHILE entry # NIL DO IF desc[ ReferentToPage(LOOPHOLE(entry.k, RefReferent)) - p0].space # Space.Current THEN DeleteFrozenRef(i, prev); IF prev # NIL THEN entry := prev.next; ELSE entry := iceBuckets[i]; END; ELSE prev := entry; entry := entry.next; END; END; END; END; IF perfOn THEN PerfEnd(); END; VAR m := monitorsTail; BEGIN WHILE m # NIL DO m.after(); m := m.prev; END; END; IF partialCollection THEN IF smallCopyPages + largeCopyPages + smallPromotionPages + largePromotionPages >= threshold THEN partialCollectionNext := FALSE; ELSE partialCollectionNext := TRUE; END; ELSE threshold := TRUNC(FLOAT(smallCopyPages + largeCopyPages + smallPromotionPages + largePromotionPages) * (1.0 + gcRatio)); partialCollectionNext := TRUE; END; collectorState := CollectorState.Zero; END CollectSomeInStateFive; (* CopySome attempts to make progress toward cleaning the new space. It returns FALSE iff there was no more work to do. It operates by cleaning the current copy page. It may also clean some number of pages on the stack. When it returns, there is a new copy page. *) PROCEDURE CopySome (): BOOLEAN = VAR originalImpureCopyPage := impureCopyPage; originalBoundary := impureCopyBoundary; cleanTo := PageToHeader(impureCopyPage); BEGIN LOOP IF cleanTo < impureCopyPtr THEN VAR ptr := impureCopyPtr; BEGIN CleanBetween(cleanTo, ptr); cleanTo := ptr; END; ELSE IF impureCopyStack = Nil THEN RETURN FALSE; END; VAR p := impureCopyStack; BEGIN impureCopyStack := desc[p - p0].link; <* ASSERT NOT desc[p - p0].pure *> IF desc[p - p0].gray THEN IF desc[p - p0].protected THEN Unprotect(p); END; CleanBetween(PageToHeader(p), PageToHeader(p + 1)); FOR i := 0 TO PageCount(p) - 1 DO desc[p + i - p0].gray := FALSE; END; IF desc[p - p0].generation = Generation.Older THEN <* ASSERT desc[p - p0].space = Space.Current *> Protect(p, readable := TRUE, writable := FALSE); END; IF perfOn THEN PerfChange(p, PageCount(p)); END; END; END; END; IF impureCopyPage # originalImpureCopyPage THEN EXIT; END; END; CleanBetween(cleanTo, originalBoundary); (* originalImpureCopyPage is now in the stack; mark it not gray *) IF originalImpureCopyPage # Nil THEN desc[originalImpureCopyPage - p0].gray := FALSE; IF desc[originalImpureCopyPage - p0].generation = Generation.Older THEN <* ASSERT desc[originalImpureCopyPage - p0].space = Space.Current *> Protect( originalImpureCopyPage, readable := TRUE, writable := FALSE); END; IF perfOn THEN PerfChange(originalImpureCopyPage, 1); END; END; RETURN TRUE; END CopySome; PROCEDURE CleanBetween (h, he: RefHeader) = BEGIN WHILE h < he DO <* ASSERT LOOPHOLE (h, INTEGER) MOD 4 = 0 *> IF h.marka THEN h.marka := FALSE; END; IF h.markb THEN h.markb := FALSE; END; VAR tc := h.typecode; BEGIN IF tc # Fill_1_type AND tc # Fill_N_type THEN <* ASSERT tc # 0 *> (* this test is enough; we'll catch the other bad values by the array index below *) VAR proc := RT0u.types[tc].mapProc; BEGIN IF proc # NIL THEN proc(Move, NIL, h + ADRSIZE(Header), TracedRefTypes); END; END; END; END; INC(h, ADRSIZE(Header) + ReferentSize(h)); END; END CleanBetween; (* We maintain a list in weakTable, starting at weakLive0, of weak refs and the objects they reference. This table is not considered a root. When HandleWeakRefs is entered, any object mentioned in that list is a candidate for cleanup. First, we determine which weakly-referenced objects with non-NIL cleanups ("WRNNC objects") are reachable from other WRNNC objects, by walking the old space. All such WRNNC objects are copied to new space, and all the objects they reference. All the weakly-referenced objects left in the old space can then be scheduled for cleanup; we move them from the list starting at weakLive0 to the list starting at weakDead0 in weakTable. A separate thread runs WeakCleaner, which does the calls to the procedures. Note that the refs in weakTable must be updated to point to new space. *) (* PreHandleWeakRefs walks the weakly-references structures in old-space, deciding on a cleanup order. *) PROCEDURE PreHandleWeakRefs () = BEGIN (* get ready to allocate on a new page (take this out!) *) InsertFiller(impureCopyPtr, impureCopyBoundary - impureCopyPtr); InsertFiller(pureCopyPtr, pureCopyBoundary - pureCopyPtr); (* allocate a stack on the side for walking the old space *) InitStack(); (* iterate over the weak refs to walk the old space *) VAR i := weakLive0; BEGIN WHILE i # -1 DO (* here, all old-space WRNNC objects that have already been scanned have marka set, as do all old-space objects reachable from them; all old-space WRNNC objects that were reachable from other already-scanned WRNNC objects have been promoted to the new space. *) WITH entry = weakTable[i] DO IF entry.p # NIL AND NOT Moved(entry.r) THEN (* we haven't seen this WRNNC object before *) VAR header := HeaderOf(LOOPHOLE(entry.r, ADDRESS)); BEGIN IF NOT header.marka THEN <* ASSERT NOT header.markb *> (* visit all old-space objects reachable from it; promote all other old-space WRNNC objects reachable from it; promote all old-space objects reachable from it that have "marka" set. mark all visited nodes with "markb". *) WeakWalk1(entry.r); <* ASSERT NOT header.marka *> <* ASSERT header.markb *> (* then change all "markb" to "marka" *) WeakWalk2(entry.r); <* ASSERT header.marka *> <* ASSERT NOT header.markb *> END; END; END; i := entry.next; END; END; END; END PreHandleWeakRefs; (* WeakWalk1 starts at a WRNNC object and visits all objects in old space reachable from it, using "markb" to keep from visiting them more than once. All other WRNNC objects visited are promoted, as are all objects already visited from other WRNNC objects. *) PROCEDURE WeakWalk1 (ref: RefReferent) = VAR ref0 := ref; BEGIN <* ASSERT StackEmpty() *> LOOP IF NOT Moved(ref) THEN VAR header := HeaderOf(ref); BEGIN IF header.marka THEN <* ASSERT NOT header.markb *> Move(NIL, ADR(ref), NIL, RT0.RefType.Traced); ELSIF NOT header.markb THEN IF header.weak AND ref # ref0 THEN Move(NIL, ADR(ref), NIL, RT0.RefType.Traced); ELSE header.markb := TRUE; WeakWalkToPush(header); END; END; END; END; IF StackEmpty() THEN EXIT; END; ref := PopStack(); END; END WeakWalk1; (* WeakWalk2 starts at a WRNNC objects and visits all objects in the old space that are reachable from it, changing "markb" to "marka" *) PROCEDURE WeakWalk2 (ref: RefReferent) = BEGIN <* ASSERT StackEmpty() *> LOOP IF NOT Moved(ref) THEN VAR header := HeaderOf(ref); BEGIN IF header.markb THEN header.markb := FALSE; header.marka := TRUE; WeakWalkToPush(header); END; END; END; IF StackEmpty() THEN EXIT; END; ref := PopStack(); END; END WeakWalk2; (* WeakWalkToPush walks all REFs in a WRNNC object, pushing them on the stack to be visited later. *) PROCEDURE WeakWalkToPush (header: RefHeader) = VAR tc := header.typecode; BEGIN IF tc # Fill_1_type AND tc # Fill_N_type THEN <* ASSERT tc # 0 *> VAR proc := RT0u.types[tc].mapProc; BEGIN IF proc # NIL THEN proc(PushStackProcedure, NIL, header + ADRSIZE(Header), TracedRefTypes); END; END; END; END WeakWalkToPush; PROCEDURE PostHandleWeakRefs () = BEGIN (* move to a new page (take this out!) *) InsertFiller(impureCopyPtr, impureCopyBoundary - impureCopyPtr); InsertFiller(pureCopyPtr, pureCopyBoundary - pureCopyPtr); (* iterate over all weak refs. if the object hasn't been promoted, schedule a cleanup *) VAR i := weakLive0; previous := -1; BEGIN WHILE i # -1 DO WITH entry = weakTable[i] DO IF Moved(entry.r) THEN (* no cleanup this time; note new address *) Move(NIL, ADR(entry.r), NIL, RT0.RefType.Traced); previous := i; i := entry.next; ELSE (* the weak ref is dead; there are no cleanups *) VAR header := HeaderOf(LOOPHOLE(entry.r, ADDRESS)); BEGIN header.weak := FALSE; END; (* move the entry from the weakLive0 list into the weakDead0 or weakFree0 list *) VAR next := entry.next; BEGIN IF previous = -1 THEN weakLive0 := next; ELSE weakTable[previous].next := next; END; entry.t.a := -1; (* keep ToRef from succeeding *) IF entry.p # NIL THEN entry.next := weakDead0; weakDead0 := i; ELSE entry.next := weakFree0; weakFree0 := i; END; i := next; END; END; END; END; END; (* for all entries on the weakDead0 list, including those just placed there, note the new address *) VAR i := weakDead0; BEGIN WHILE i # -1 DO WITH entry = weakTable[i] DO <* ASSERT entry.t.a = -1 *> Move(NIL, ADR(entry.r), NIL, RT0.RefType.Traced); i := entry.next; END; END; END; END PostHandleWeakRefs; (* The stack for walking the old space is maintained on the heap in the new space. *) VAR stack : REF ARRAY OF RefReferent; stack0, stack1, stackA: UNTRACED REF RefReferent; stackN : CARDINAL; (* InitStack allocates an initial stack of 100 elements. *) PROCEDURE InitStack () = BEGIN stackN := 100; stack := NEW(REF ARRAY OF RefReferent, stackN); stack0 := ADR(stack^[0]); stack1 := stack0 + stackN * ADRSIZE(RefReferent); stackA := stack0; END InitStack; (* PushStack pushes an object onto the stack, growing it if necessary. *) PROCEDURE PushStack (ref: RefReferent) = BEGIN IF stackA = stack1 THEN VAR newStackN := stackN * 2; newStack: REF ARRAY OF RefReferent := NEW(REF ARRAY OF RefReferent, newStackN); BEGIN SUBARRAY(newStack^, 0, stackN) := SUBARRAY(stack^, 0, stackN); stack0 := ADR(newStack^[0]); stackA := stack0 + stackN * ADRSIZE(RefReferent); stack1 := stack0 + newStackN * ADRSIZE(RefReferent); stack := newStack; stackN := newStackN; END; END; stackA^ := ref; INC(stackA, ADRSIZE(RefReferent)); END PushStack; (* PopStack pops an object off the stack. *) PROCEDURE PopStack (): RefReferent = BEGIN DEC(stackA, ADRSIZE(RefReferent)); RETURN stackA^; END PopStack; (* StackEmpty tells if the stack is empty. *) PROCEDURE StackEmpty (): BOOLEAN = BEGIN RETURN stackA = stack0; END StackEmpty; (* PushStackProcedure is called from WeakWalkToPush to push a ref found inside another object. *) PROCEDURE PushStackProcedure (<*UNUSED*> arg : REFANY; cp : ADDRESS; <*UNUSED*> root: ADDRESS; <*UNUSED*> kind: RT0.RefType) = VAR ref := LOOPHOLE(cp, UNTRACED REF RefReferent)^; BEGIN IF ref # NIL THEN PushStack(ref); END; END PushStackProcedure; (* This procedure is exported, but I don't think anyone uses it. *) PROCEDURE AllocatePages (nbPages: INTEGER): ADDRESS = BEGIN WITH firstPage = FindFreePages(nbPages, notAfter := SET OF Note{}) DO FOR i := firstPage TO firstPage + nbPages - 1 DO desc[i - p0].space := Space.Unallocated; END; IF perfOn THEN PerfChange(firstPage, nbPages); END; DEC(allocatedPages, nbPages); RETURN PageToAddress(firstPage); END; END AllocatePages; PROCEDURE MakeCollectible (from: ADDRESS; nbPages: INTEGER) = VAR firstNewPage, lastNewPage: Page; BEGIN firstNewPage := (LOOPHOLE(from, INTEGER) + BytesPerPage - 1) DIV BytesPerPage; lastNewPage := firstNewPage + nbPages - 1; (* update the side arrays *) desc[firstNewPage - p0] := Desc{space := Space.Current, generation := Generation.Younger, pure := FALSE, note := Note.Allocated, gray := FALSE, protected := FALSE, continued := FALSE}; FOR i := firstNewPage + 1 TO lastNewPage DO desc[i - p0] := Desc{space := Space.Current, generation := Generation.Younger, pure := FALSE, note := Note.Allocated, gray := FALSE, protected := FALSE, continued := TRUE}; END; IF perfOn THEN PerfChange(firstNewPage, nbPages); END; INC(allocatedPages, nbPages); END MakeCollectible; (* AllocForNew allocates space for a NEW. *) PROCEDURE AllocForNew (dataSize, dataAlignment: CARDINAL): RefReferent = VAR alignment : INTEGER; nextNewPtr: RefHeader; BEGIN INC(RT0u.inCritical); (* Where would this heap object end if we were to allocate at newPtr? *) VAR referentTry := newPtr + ADRSIZE(Header); BEGIN (* ---------------- see CheckTypes --------------------------------- | WITH a = RTMisc.Align (referentTry, dataAlignment) DO | alignment := a - referentTry; | nextNewPtr := LOOPHOLE (a + dataSize, RefHeader); END; ------------------------------------------------------------------ *) alignment := align[Word.And(LOOPHOLE(referentTry, INTEGER), MaxAlignMask), dataAlignment]; nextNewPtr := referentTry + (alignment + dataSize); END; (* If this is not ok, take the long route *) IF nextNewPtr > newBoundary THEN nextNewPtr := NIL; (* clear in case of GC *) VAR res := LongAlloc(dataSize, dataAlignment, currentPtr := newPtr, currentBoundary := newBoundary, currentPage := newPage, stack := newStack, allocMode := AllocMode.New, pure := FALSE); BEGIN DEC(RT0u.inCritical); RETURN res; END; END; (* Align the referent *) IF alignment # 0 THEN InsertFiller(newPtr, alignment); newPtr := newPtr + alignment; END; VAR res := LOOPHOLE(newPtr + ADRSIZE(Header), RefReferent); BEGIN newPtr := nextNewPtr; DEC(RT0u.inCritical); RETURN res; END; END AllocForNew; (* AllocForPureCopy and AllocForImpureCopy allocate space to copy an object from oldspace; they have the same logic as AllocForNew. *) PROCEDURE AllocForPureCopy (dataSize, dataAlignment: CARDINAL): RefReferent = VAR alignment : INTEGER; nextPureCopyPtr : RefHeader; res, referentTry: RefReferent; BEGIN <* ASSERT collectorOn *> INC(RT0u.inCritical); (* Where would this heap object end if we were to allocate at pureCopyPtr? *) referentTry := pureCopyPtr + ADRSIZE(Header); (* ---------------- see CheckTypes --------------------------------- | WITH a = RTMisc.Align (referentTry, dataAlignment) DO | alignment := a - referentTry; | nextPureCopyPtr := LOOPHOLE (a + dataSize, RefHeader); END; ------------------------------------------------------------------ *) VAR tmp := Word.And(LOOPHOLE(referentTry, INTEGER), MaxAlignMask); BEGIN alignment := align[tmp, dataAlignment]; nextPureCopyPtr := referentTry + (alignment + dataSize); END; (* If this is not ok, take the long route *) IF nextPureCopyPtr > pureCopyBoundary THEN res := LongAlloc(dataSize, dataAlignment, currentPtr := pureCopyPtr, currentBoundary := pureCopyBoundary, currentPage := pureCopyPage, stack := pureCopyStack, allocMode := AllocMode.Copy, pure := TRUE); DEC(RT0u.inCritical); RETURN res; END; (* Align the referent *) IF alignment # 0 THEN InsertFiller(pureCopyPtr, alignment); pureCopyPtr := pureCopyPtr + alignment; END; res := LOOPHOLE(pureCopyPtr + ADRSIZE(Header), RefReferent); pureCopyPtr := nextPureCopyPtr; DEC(RT0u.inCritical); RETURN res; END AllocForPureCopy; PROCEDURE AllocForImpureCopy (dataSize, dataAlignment: CARDINAL): RefReferent = VAR alignment : INTEGER; nextImpureCopyPtr: RefHeader; res, referentTry : RefReferent; BEGIN <* ASSERT collectorOn *> INC(RT0u.inCritical); (* Where would this heap object end if we were to allocate at ImpureCopyPtr? *) referentTry := impureCopyPtr + ADRSIZE(Header); (* ---------------- see CheckTypes --------------------------------- | WITH a = RTMisc.Align (referentTry, dataAlignment) DO | alignment := a - referentTry; | nextImpureCopyPtr := LOOPHOLE (a + dataSize, RefHeader); END; ------------------------------------------------------------------ *) VAR tmp := Word.And(LOOPHOLE(referentTry, INTEGER), MaxAlignMask); BEGIN alignment := align[tmp, dataAlignment]; nextImpureCopyPtr := referentTry + (alignment + dataSize); END; (* If this is not ok, take the long route *) IF nextImpureCopyPtr > impureCopyBoundary THEN res := LongAlloc(dataSize, dataAlignment, currentPtr := impureCopyPtr, currentBoundary := impureCopyBoundary, currentPage := impureCopyPage, stack := impureCopyStack, allocMode := AllocMode.Copy, pure := FALSE); DEC(RT0u.inCritical); RETURN res; END; (* Align the referent *) IF alignment # 0 THEN InsertFiller(impureCopyPtr, alignment); impureCopyPtr := impureCopyPtr + alignment; END; res := LOOPHOLE(impureCopyPtr + ADRSIZE(Header), RefReferent); impureCopyPtr := nextImpureCopyPtr; DEC(RT0u.inCritical); RETURN res; END AllocForImpureCopy; TYPE AllocMode = {New, Copy}; PROCEDURE LongAlloc ( dataSize, dataAlignment : CARDINAL; VAR (*INOUT*) currentPtr, currentBoundary: RefHeader; VAR (*INOUT*) currentPage : Page; VAR (*INOUT*) stack : Page; allocMode : AllocMode; pure : BOOLEAN ): RefReferent = VAR nbBytes := RTMisc.Upper(ADRSIZE(Header), dataAlignment) + dataSize; nbPages := (nbBytes + AdrPerPage - 1) DIV AdrPerPage; res : RefReferent; notAfter: SET OF Note; BEGIN IF allocMode = AllocMode.New THEN CollectEnough(); notAfter := SET OF Note{Note.Copied}; ELSE notAfter := SET OF Note{Note.Allocated}; END; VAR thisPage := FindFreePages(nbPages, notAfter := notAfter); (* thisPage points to a block of nbPages contiguous, free pages; just what we need! *) thisPtr := PageToHeader(thisPage); thisBoundary := PageToHeader(thisPage + 1); gray := allocMode = AllocMode.Copy AND NOT pure; generation : Generation; note : Note; BEGIN (* maybe we have to put a filler to align this thing *) res := RTMisc.Align(thisPtr + ADRSIZE(Header), dataAlignment); InsertFiller(thisPtr, res - ADRSIZE(Header) - thisPtr); (* allocate the object *) thisPtr := LOOPHOLE(res + dataSize, RefHeader); IF allocMode = AllocMode.New THEN generation := Generation.Younger; note := Note.Allocated; ELSE generation := copyGeneration; IF generation = Generation.Older THEN <* ASSERT gray OR pure *> END; note := Note.Copied; END; desc[thisPage - p0] := Desc{space := Space.Current, generation := generation, pure := pure, note := note, gray := gray, protected := FALSE, continued := FALSE}; IF nbPages = 1 THEN CASE allocMode OF | AllocMode.New => INC(smallNewPages); | AllocMode.Copy => INC(smallCopyPages); END; ELSE CASE allocMode OF | AllocMode.New => INC(largeNewPages, nbPages); | AllocMode.Copy => INC(largeCopyPages, nbPages); END; FOR i := 1 TO nbPages - 1 DO desc[thisPage + i - p0] := Desc{space := Space.Current, generation := generation, pure := pure, note := note, gray := gray, protected := FALSE, continued := TRUE}; END; END; IF perfOn THEN PerfChange(thisPage, nbPages); END; IF nbPages = 1 THEN IF thisBoundary - thisPtr > currentBoundary - currentPtr THEN (* more allocation space available on this page; fill and file the current page *) InsertFiller(currentPtr, currentBoundary - currentPtr); IF currentPage # Nil THEN <* ASSERT desc[currentPage - p0].space = Space.Current *> desc[currentPage - p0].link := stack; stack := currentPage; IF allocMode = AllocMode.Copy THEN <* ASSERT desc[currentPage - p0].gray OR desc[currentPage - p0].pure *> END; END; currentPtr := thisPtr; currentBoundary := thisBoundary; currentPage := thisPage; ELSE (* more allocation space available on current page; fill and file this page *) InsertFiller(thisPtr, thisBoundary - thisPtr); desc[thisPage - p0].link := stack; stack := thisPage; END; ELSE (* file this page *) desc[thisPage - p0].link := stack; stack := thisPage; END; END; RETURN res; END LongAlloc; (*--------------------------------------------------*) VAR backgroundCondition := NEW(Thread.Condition); backgroundWaiting := FALSE; (* The background thread may be present or not. If it is present, it speeds collection asynchronously. Because it makes progress slowly, it should impose only a small overhead when the mutator is running, but quickly complete a collection if the collector pauses. *) PROCEDURE BackgroundThread (<* UNUSED *> closure: Thread.Closure): REFANY = BEGIN LOOP backgroundWaiting := TRUE; (* no locks, unfortunately *) WHILE collectorState = CollectorState.Zero DO LOCK mutex DO Thread.Wait(mutex, backgroundCondition); END; END; backgroundWaiting := FALSE; WHILE collectorState # CollectorState.Zero DO INC(RT0u.inCritical); IF collectorState # CollectorState.Zero THEN CollectorOn(); CollectSome(); CollectorOff(); END; DEC(RT0u.inCritical); Time.LongPause(1); (* one second *) END; END; END BackgroundThread; (* -------------------------------------------------------- allocation *) VAR init_cache: ARRAY [0 .. 2047] OF ADDRESS; (* initialized copies of freshly allocated objects *) PROCEDURE FastAllocate (tc: Typecode): REFANY = BEGIN RETURN Allocate(tc); END FastAllocate; CONST HdrSize = ADRSIZE(Header); PROCEDURE Allocate (tc: Typecode): REFANY = VAR def: RT0.TypeDefinition; res: RefReferent; BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) def := RT0u.types[tc]; IF NOT def.traced OR def.nDimensions # 0 THEN FatalErrorI("RTHeap.Allocate: improper typecode: ", tc); END; INC(RT0u.inCritical); res := AllocForNew(def.dataSize, def.dataAlignment); IF init_cache[tc] # NIL THEN RTMisc.Copy(init_cache[tc], res - HdrSize, def.dataSize + HdrSize); ELSE LOOPHOLE(res - HdrSize, RefHeader)^ := Header{typecode := tc, forwarded := FALSE}; RTMisc.Zero(res, def.dataSize); IF def.defaultMethods # NIL THEN LOOPHOLE(res, UNTRACED REF ADDRESS)^ := def.defaultMethods; END; VAR d := def; BEGIN WHILE d # NIL DO IF d.initProc # NIL THEN d.initProc(res) END; d := d.parent; END; END; IF def.dataSize <= BYTESIZE(RT0.Typecell) THEN init_cache[tc] := Malloc(def.dataSize + HdrSize); RTMisc.Copy(res - HdrSize, init_cache[tc], HdrSize + def.dataSize); END; END; DEC(RT0u.inCritical); RETURN LOOPHOLE(res, REFANY); END Allocate; PROCEDURE AllocateUntracedRef (tc: Typecode): ADDRESS = VAR res: ADDRESS; BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) WITH def = RT0u.types[tc]^ DO IF def.traced OR def.defaultMethods # NIL THEN FatalErrorI("RTHeap.AllocateUntracedRef: improper typecode: ", tc); END; res := Malloc(def.dataSize); RTMisc.Zero(res, def.dataSize); IF def.initProc # NIL THEN def.initProc(res) END; END; (*WITH def*) RETURN res; END AllocateUntracedRef; PROCEDURE AllocateUntraced (tc: Typecode): UNTRACED ROOT = VAR d : RT0.TypeDefinition; res : ADDRESS; hdrSize: INTEGER; BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) d := RT0u.types[tc]; WITH def = d^ DO IF def.traced OR def.defaultMethods = NIL THEN FatalErrorI("RTHeap.AllocateUntraced: improper typecode:", tc); END; hdrSize := MAX(BYTESIZE(Header), def.dataAlignment); res := Malloc(hdrSize + def.dataSize) + hdrSize; RTMisc.Zero(res, def.dataSize); (* given res, we are not able to free the result of malloc *) LOOPHOLE(res - ADRSIZE(Header), RefHeader)^ := Header{typecode := tc, forwarded := FALSE}; IF def.defaultMethods # NIL THEN LOOPHOLE(res, UNTRACED REF ADDRESS)^ := def.defaultMethods; END; WHILE d # NIL DO IF d.initProc # NIL THEN d.initProc(res) END; d := d.parent; END; END; (*WITH def*) RETURN res; END AllocateUntraced; PROCEDURE CheckOpenArray (READONLY def: RT0.Typecell; READONLY s : ARRAY OF INTEGER): INTEGER = VAR nbElems := 1; BEGIN IF def.nDimensions = 0 THEN FatalErrorI( "RTHeap.CheckOpenArray: improper typecode: ", def.typecode); END; IF def.nDimensions # NUMBER(s) THEN FatalError(NIL, 0, "RTHeap.CheckOpenArray", ": dimensions in type and shape disagree"); END; FOR i := 0 TO def.nDimensions - 1 DO IF s[i] < 0 THEN FatalErrorI("RTHeap.CheckOpenArray: negative array size: ", s[i]); END; nbElems := s[i] * nbElems; END; RETURN nbElems; END CheckOpenArray; PROCEDURE FastAllocateOpenArray (tc: Typecode; READONLY s: ARRAY OF INTEGER): REFANY = BEGIN RETURN AllocateOpenArray(tc, s); END FastAllocateOpenArray; PROCEDURE AllocateOpenArray (tc: Typecode; READONLY s: ARRAY OF INTEGER): REFANY = VAR def : RT0.TypeDefinition; nbElems, nBytes: CARDINAL; res : RefReferent; BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) def := RT0u.types[tc]; nbElems := CheckOpenArray(def^, s); nBytes := def.dataSize + nbElems * def.elementSize; INC(RT0u.inCritical); res := AllocForNew( RTMisc.Upper(nBytes, BYTESIZE(Header)), def.dataAlignment); LOOPHOLE(res - ADRSIZE(Header), RefHeader)^ := Header{typecode := tc, forwarded := FALSE}; LOOPHOLE(res, UNTRACED REF ADDRESS)^ := res + def.dataSize; FOR i := 0 TO def.nDimensions - 1 DO LOOPHOLE(res + ADRSIZE(ADDRESS) + i * ADRSIZE(INTEGER), UNTRACED REF INTEGER)^ := s[i]; END; RTMisc.Zero( res + ADRSIZE(ADDRESS) + def.nDimensions * ADRSIZE(INTEGER), nbElems * def.elementSize); WHILE def # NIL DO IF def.initProc # NIL THEN def.initProc(res) END; def := def.parent; END; DEC(RT0u.inCritical); RETURN LOOPHOLE(res, REFANY); END AllocateOpenArray; PROCEDURE AllocateUntracedOpenArrayDef ( def: RT0.TypeDefinition; READONLY s : ARRAY OF INTEGER ): ADDRESS = VAR nbElems := CheckOpenArray(def^, s); VAR res := Malloc(def.dataSize + nbElems * def.elementSize); BEGIN LOOPHOLE(res, UNTRACED REF ADDRESS)^ := res + def.dataSize; FOR i := 0 TO def.nDimensions - 1 DO LOOPHOLE(res + ADRSIZE(ADDRESS) + i * ADRSIZE(INTEGER), UNTRACED REF INTEGER)^ := s[i]; END; RTMisc.Zero( res + ADRSIZE(ADDRESS) + def.nDimensions * ADRSIZE(INTEGER), nbElems * def.elementSize); WHILE def # NIL DO IF def.initProc # NIL THEN def.initProc(res) END; def := def.parent; END; RETURN res; END AllocateUntracedOpenArrayDef; PROCEDURE AllocateUntracedOpenArray ( tc: Typecode; READONLY s : ARRAY OF INTEGER): ADDRESS = BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) RETURN AllocateUntracedOpenArrayDef(RT0u.types[tc], s); END AllocateUntracedOpenArray; PROCEDURE Duplicate (r: REFANY): REFANY = VAR def := RT0u.types[TypecodeOf(LOOPHOLE(r, ADDRESS))]; VAR header := HeaderOf(LOOPHOLE(r, ADDRESS)); VAR dataSize := ReferentSize(header); VAR nBytes := RTMisc.Upper(dataSize, BYTESIZE(Header)); VAR res := AllocForNew(nBytes, def.dataAlignment); BEGIN RTMisc.Copy(header, HeaderOf(res), ADRSIZE(Header) + dataSize); (* if it is an open array, update its internal data pointer *) IF def.nDimensions # 0 THEN LOOPHOLE(res, UNTRACED REF ADDRESS)^ := res + (LOOPHOLE(r, UNTRACED REF ADDRESS)^ - LOOPHOLE(r, ADDRESS)); END; RETURN LOOPHOLE(res, REFANY); END Duplicate; PROCEDURE Malloc (size: INTEGER): ADDRESS = VAR res: ADDRESS; BEGIN INC(RT0u.inCritical); res := Cstdlib.malloc(size); IF res = NIL THEN FatalError(NIL, 0, "malloc failed, unable to get more memory"); END; DEC(RT0u.inCritical); RETURN res; END Malloc; (* --------------------------------------------------------- collector *) PROCEDURE StartGC () = BEGIN INC(RT0u.inCritical); CollectorOn(); IF collectorState = CollectorState.Zero THEN partialCollectionNext := FALSE; REPEAT CollectSome(); UNTIL collectorState # CollectorState.Zero; IF NOT (incremental AND RTHeapDep.VM AND vmOff = 0) THEN REPEAT CollectSome(); UNTIL collectorState = CollectorState.Zero; END; END; CollectorOff(); DEC(RT0u.inCritical); END StartGC; PROCEDURE FinishGC () = BEGIN INC(RT0u.inCritical); CollectorOn(); WHILE collectorState # CollectorState.Zero DO CollectSome(); END; CollectorOff(); DEC(RT0u.inCritical); END FinishGC; PROCEDURE GCOff () = BEGIN INC(RT0u.inCritical); FinishVM(); INC(gcOff); DEC(RT0u.inCritical); IF perfOn THEN PerfAllow(); END; END GCOff; PROCEDURE GCOn () = BEGIN INC(RT0u.inCritical); DEC(gcOff); DEC(RT0u.inCritical); IF perfOn THEN PerfAllow(); END; END GCOn; PROCEDURE DisableCollection () = BEGIN GCOff(); END DisableCollection; PROCEDURE EnableCollection () = BEGIN GCOn(); END EnableCollection; PROCEDURE FinishVM () = BEGIN INC(RT0u.inCritical); FinishGC(); CollectorOn(); (* no gray pages now; only protected pages are in older generation *) FOR p := p0 TO p1 - 1 DO IF desc[p - p0].protected THEN Unprotect(p); END; END; CollectorOff(); DEC(RT0u.inCritical); END FinishVM; PROCEDURE VMOff () = BEGIN INC(RT0u.inCritical); FinishVM(); INC(vmOff); DEC(RT0u.inCritical); END VMOff; PROCEDURE VMOn () = BEGIN INC(RT0u.inCritical); DEC(vmOff); DEC(RT0u.inCritical); END VMOn; PROCEDURE Crash (): BOOLEAN = BEGIN IF collectorOn THEN FOR p := p0 TO p1 - 1 DO IF desc[p - p0].protected THEN Unprotect(p); END; END; RETURN FALSE; ELSE INC(RT0u.inCritical); FinishGC(); CollectorOn(); FOR p := p0 TO p1 - 1 DO IF desc[p - p0].protected THEN Unprotect(p); END; END; INC(gcOff); RETURN TRUE; END; END Crash; (* -------------------------------------------------- heap data layout *) PROCEDURE GetDataAdr (r: REFANY): ADDRESS = VAR def: RT0.TypeDefinition; BEGIN def := RT0u.types[TypecodeOf(LOOPHOLE(r, ADDRESS))]; IF def.defaultMethods # NIL THEN RETURN LOOPHOLE(r, ADDRESS) + ADRSIZE(ADDRESS); ELSIF def.nDimensions # 0 THEN RETURN LOOPHOLE(r, UNTRACED REF ADDRESS)^; ELSE RETURN LOOPHOLE(r, ADDRESS); END; END GetDataAdr; PROCEDURE GetDataSize (r: REFANY): CARDINAL = VAR def : RT0.TypeDefinition; nbElems: INTEGER; BEGIN def := RT0u.types[TypecodeOf(LOOPHOLE(r, ADDRESS))]; IF def.defaultMethods # NIL THEN RETURN def.dataSize - BYTESIZE(ADDRESS); ELSIF def.nDimensions # 0 THEN nbElems := 1; VAR sizes := LOOPHOLE(LOOPHOLE(r, ADDRESS) + ADRSIZE(ADDRESS), UNTRACED REF INTEGER); BEGIN FOR i := 0 TO def.nDimensions - 1 DO nbElems := nbElems * sizes^; INC(sizes, ADRSIZE(INTEGER)); END; END; RETURN nbElems * def.elementSize; ELSE RETURN def.dataSize; END; END GetDataSize; (* ------------------------------------------------------- open arrays *) PROCEDURE GetShape (r: REFANY; VAR nDimensions: INTEGER; VAR s: ArrayShape) = BEGIN UnsafeGetShape(r, nDimensions, s); END GetShape; PROCEDURE UnsafeGetShape ( r : REFANY; VAR nDimensions: INTEGER; VAR s: UNTRACED REF ARRAY [0 .. 999] OF INTEGER) = BEGIN WITH def = RT0u.types[TypecodeOf(LOOPHOLE(r, ADDRESS))] DO nDimensions := def.nDimensions; IF nDimensions # 0 THEN s := LOOPHOLE(LOOPHOLE(r, ADDRESS) + ADRSIZE(ADDRESS), ArrayShape); END; END; END UnsafeGetShape; PROCEDURE GetNDimensions (tc: Typecode): CARDINAL = VAR def: RT0.TypeDefinition; BEGIN def := RT0u.types[tc]; RETURN def.nDimensions; END GetNDimensions; (* ---------------------------------------------------------- monitors *) TYPE PublicMonitorClosure = OBJECT METHODS before (); after (); END; REVEAL MonitorClosure = PublicMonitorClosure BRANDED "RTHeap.MonitorClosure" OBJECT next, prev: MonitorClosure; OVERRIDES before := Noop; after := Noop; END; VAR monitorsHead, monitorsTail: MonitorClosure; PROCEDURE RegisterMonitor (cl: MonitorClosure) = BEGIN cl.next := monitorsHead; IF monitorsHead = NIL THEN monitorsTail := cl; ELSE monitorsHead.prev := cl; END; monitorsHead := cl; END RegisterMonitor; PROCEDURE UnregisterMonitor (cl: MonitorClosure) = BEGIN IF cl = monitorsHead THEN IF cl = monitorsTail THEN monitorsHead := NIL; monitorsTail := NIL; ELSE monitorsHead := monitorsHead.next; monitorsHead.prev := NIL; END; ELSE IF cl = monitorsTail THEN monitorsTail := monitorsTail.prev; monitorsTail.next := NIL; ELSE cl.prev.next := cl.next; cl.next.prev := cl.prev; END; END; END UnregisterMonitor; PROCEDURE Noop (<*UNUSED*> cl: MonitorClosure) = BEGIN END Noop; (* ----------------------------------------------------------- objects *) PROCEDURE GetSize (r: REFANY): INTEGER = BEGIN RETURN ReferentSize(HeaderOf(LOOPHOLE(r, ADDRESS))) + BYTESIZE(Header) END GetSize; PROCEDURE DisposeTraced (VAR r: REFANY) = BEGIN r := NIL; END DisposeTraced; PROCEDURE DisposeUntraced (VAR a: ADDRESS) = BEGIN IF a # NIL THEN Cstdlib.free(a); a := NIL; END; END DisposeUntraced; PROCEDURE DisposeUntracedObj (VAR a: ADDRESS) = VAR def: RT0.TypeDefinition; BEGIN IF a # NIL THEN def := RT0u.types[TypecodeOf(a)]; a := a - MAX(BYTESIZE(Header), def.dataAlignment); Cstdlib.free(a); a := NIL; END; END DisposeUntracedObj; (* ----------------------------------------------------------- OBJECTs *) PROCEDURE SetDefaultMethods (r: ROOT) = VAR tc := TypecodeOf(LOOPHOLE(r, RefReferent)); BEGIN LOOPHOLE(r, UNTRACED REF ADDRESS)^ := RT0u.types[tc].defaultMethods; END SetDefaultMethods; (* ----------------------------------------------------------- ice box *) (* The ice box contains addresses of referents that should not be moved by the garbage collector. The main use of that feature is to support hash tables indexed by refs. Of course, the garbage collector has to know about those referents. Since they don't move, we can store them in a hash table (I know, it is funny to implement ref table to support ref table, but...) To allow multiple independant freeze of a given referent, a count is associated to each of them. The icebox is a hash table of pairs
. Each bucket is list sorted in increasing order. To make things simpler, the table is not an array of pointers to the heads of the lists, but rather an array of list elements. That way, insertions and deletion at the front of the list don't have to be treated separately. *) CONST Multiplier = -1664117991; (* = LOOPHOLE( Round( .6125423371 * 2^32 ), INTEGER ) *) MaxChain = 10; TYPE IceCube = UNTRACED REF RECORD next : IceCube; k : INTEGER; temperature: INTEGER; END; Buckets = UNTRACED REF ARRAY OF IceCube; VAR iceBoxCover := NEW(Thread.Mutex); nonEmptyBuckets := 0; iceBuckets := NEW(Buckets, 64); iceBoxLogSize := 6; iceBoxEntries := 0; PROCEDURE LookupIceBox (k: INTEGER; VAR bucket: CARDINAL; VAR prev: IceCube): BOOLEAN RAISES {} = (* look for an entry with key 'k'. If found, set 'bucket' to the bucket containing it, 'prev' to the entry before it (may be NIL if it is the first in the bucket), and return TRUE. If not found, set 'bucket' to the bucket in which it would be, 'prev' to entry that would be just before it (may be NIL if it would be the first in the bucket), and return FALSE. *) VAR entry: IceCube; BEGIN bucket := Word.Shift(k * Multiplier, iceBoxLogSize - BITSIZE(Word.T)); prev := NIL; entry := iceBuckets[bucket]; WHILE entry # NIL AND k >= entry.k DO IF k = entry.k THEN RETURN TRUE; END; prev := entry; entry := prev.next; END; RETURN entry # NIL AND k = entry.k; END LookupIceBox; PROCEDURE FreezeRef (r: REFANY) RAISES {} = VAR prev, new: IceCube; bucket : CARDINAL; BEGIN <* ASSERT r # NIL *> LOCK iceBoxCover DO IF LookupIceBox(LOOPHOLE(r, INTEGER), bucket, prev) THEN IF prev # NIL THEN DEC(prev.next.temperature); ELSE DEC(iceBuckets[bucket].temperature); END; ELSE new := NEW(IceCube, k := LOOPHOLE(r, INTEGER), temperature := -1); IF prev # NIL THEN new.next := prev.next; prev.next := new; ELSE new.next := iceBuckets[bucket]; IF new.next = NIL THEN INC(nonEmptyBuckets); END; iceBuckets[bucket] := new; END; INC(iceBoxEntries); IF (iceBoxEntries + nonEmptyBuckets - 1) DIV nonEmptyBuckets > MaxChain THEN ExpandIceBox(); END; END; END; END FreezeRef; PROCEDURE ExpandIceBox () RAISES {} = VAR oldBuckets := iceBuckets; movedEntry, nextMovedEntry, prev: IceCube; bucket : CARDINAL; BEGIN iceBuckets := NEW(Buckets, 2 * NUMBER(oldBuckets^)); INC(iceBoxLogSize); nonEmptyBuckets := 0; FOR oldI := FIRST(oldBuckets^) TO LAST(oldBuckets^) DO movedEntry := oldBuckets[oldI].next; WHILE movedEntry # NIL DO nextMovedEntry := movedEntry.next; VAR b := LookupIceBox(movedEntry.k, bucket, prev); BEGIN <* ASSERT NOT b *> END; IF prev # NIL THEN movedEntry.next := prev.next; prev.next := movedEntry; ELSE IF iceBuckets[bucket] = NIL THEN INC(nonEmptyBuckets); END; movedEntry.next := iceBuckets[bucket]; iceBuckets[bucket] := movedEntry; END; movedEntry := nextMovedEntry; END; END; DISPOSE(oldBuckets); END ExpandIceBox; PROCEDURE UnfreezeRef (r: REFANY) RAISES {} = CONST Warm = 0; VAR prev : IceCube; bucket: CARDINAL; melted: BOOLEAN; BEGIN <* ASSERT r # NIL *> LOCK iceBoxCover DO VAR b := LookupIceBox(LOOPHOLE(r, INTEGER), bucket, prev); BEGIN <* ASSERT b *> END; IF prev # NIL THEN INC(prev.next.temperature); melted := prev.next.temperature = Warm; ELSE INC(iceBuckets[bucket].temperature); melted := iceBuckets[bucket].temperature = Warm; END; IF melted THEN DeleteFrozenRef(bucket, prev); END; END; END UnfreezeRef; PROCEDURE DeleteFrozenRef (bucket: CARDINAL; prev: IceCube) RAISES {} = VAR todelete: IceCube; BEGIN IF prev # NIL THEN todelete := prev.next; prev.next := todelete.next; ELSE todelete := iceBuckets[bucket]; iceBuckets[bucket] := todelete.next; END; IF iceBuckets[bucket] = NIL THEN DEC(nonEmptyBuckets); END; DEC(iceBoxEntries); DISPOSE(todelete); END DeleteFrozenRef; (* --------------------------------------------------------- debugging *) VAR fillersSeen := FALSE; fillersStart: RefHeader; PROCEDURE ShowFillers (start, stop: RefHeader) = BEGIN IF NOT fillersSeen THEN RETURN; END; DEC(stop, ADRSIZE(Header)); IF start = stop THEN PutHexa(stderr, LOOPHOLE(start, INTEGER)); PutText(stderr, " - filler\n"); ELSE PutHexa(stderr, LOOPHOLE(start, INTEGER)); PutText(stderr, " - "); PutHexa(stderr, LOOPHOLE(stop, INTEGER)); PutText(stderr, " : "); PutInt(stderr, (stop - start) DIV ADRSIZE(Header) + 1); PutText(stderr, " fillers\n"); END; fillersSeen := FALSE; END ShowFillers; PROCEDURE ShowPage (page: Page; print: BOOLEAN) = (* scans page; if print is TRUE, sends to stderr the starting addresses of the objects and their typecodes *) VAR h := PageToHeader(page); p := HeaderToPage(h); def: RT0.TypeDefinition; tc : Typecode; BEGIN IF p = Nil THEN PutText(stderr, "Invalid page\n"); ELSIF desc[p - p0].continued THEN PutText(stderr, "continuation page\n"); ELSIF desc[p - p0].space # Space.Previous AND desc[p - p0].space # Space.Current THEN PutText(stderr, "page not in the old or next space\n"); ELSE WHILE HeaderToPage(h) = p AND (p # newPage OR h < newPtr) DO tc := h.typecode; IF tc = Fill_1_type OR tc = Fill_N_type THEN IF NOT fillersSeen THEN fillersSeen := TRUE; fillersStart := h; END; ELSE IF print THEN ShowFillers(fillersStart, h); PutHexa(stderr, LOOPHOLE(h, INTEGER)); def := RT0u.types[tc]; PutText(stderr, " : tc = "); PutInt(stderr, def.typecode); PutText(stderr, "\n"); END; END; INC(h, ADRSIZE(Header) + ReferentSize(h)); END; ShowFillers(fillersStart, h); END; END ShowPage; PROCEDURE GCstatus (mode: INTEGER) = (* dump the heap: mode >= 0: calls ShowPage on page mode mode = -1; show global information mode = -2; show global information and call ShowPage on every page *) VAR p, pp: Page; BEGIN IF mode >= 0 THEN ShowPage(mode, TRUE); ELSE PutText(stderr, "pureCopyStack = "); PutInt(stderr, pureCopyStack); PutText(stderr, ", impureCopyStack = "); PutInt(stderr, impureCopyStack); PutText(stderr, ", collections = "); PutInt(stderr, collections); PutText(stderr, "\n"); PutText(stderr, "allocated = "); PutInt(stderr, allocatedPages); PutText(stderr, ", active = "); PutInt(stderr, smallNewPages + smallCopyPages + smallPromotionPages); PutText(stderr, "s + "); PutInt(stderr, largeNewPages + largeCopyPages + largePromotionPages); PutText(stderr, "l\n"); IF mode < -1 THEN p := p0; WHILE p < p1 DO IF desc[p - p0].space = Space.Unallocated THEN pp := p + 1; WHILE pp < p1 AND desc[pp - p0].space = Space.Unallocated DO INC(pp); END; PutText(stderr, "-------------- pages "); PutInt(stderr, p); PutText(stderr, " to "); PutInt(stderr, pp - 1); PutText(stderr, " ---------- UNALLOCATED\n"); p := pp; ELSE IF NOT desc[p - p0].continued THEN PutText(stderr, "h page "); ELSE PutText(stderr, "c page "); END; PutInt(stderr, p); PutText(stderr, ", space = "); PutInt(stderr, ORD(desc[p - p0].space)); PutText(stderr, ", link = "); PutInt(stderr, desc[p - p0].link); PutText(stderr, "\n"); IF NOT desc[p - p0].continued AND mode < -2 AND (desc[p - p0].space = Space.Previous OR desc[p - p0].space = Space.Current) THEN ShowPage(p, mode = -4); END; INC(p); END; END; END; END; Flush(stderr); END GCstatus; PROCEDURE InstallSanityCheck () = BEGIN RegisterMonitor( NEW(MonitorClosure, before := SanityCheck, after := SanityCheck)); END InstallSanityCheck; (* SanityCheck checks the heap for correctness when no collection is in progress. *) PROCEDURE SanityCheck (<*UNUSED*> self: MonitorClosure) = VAR p := p0; BEGIN WHILE p < p1 DO CASE desc[p - p0].space OF | Space.Unallocated => INC(p); | Space.Previous => <* ASSERT FALSE *> | Space.Current => <* ASSERT NOT desc[p - p0].gray *> <* ASSERT NOT desc[p - p0].continued *> IF desc[p - p0].protected THEN <* ASSERT desc[p - p0].generation = Generation.Older *> END; (* visit the objects on the page *) VAR h := PageToHeader(p); he := PageToHeader(p + 1); BEGIN IF p = newPage THEN he := newPtr; END; WHILE h < he DO VAR tc := h.typecode; BEGIN <* ASSERT (0 < tc AND tc < RT0u.nTypes) OR tc = Fill_1_type OR tc = Fill_N_type *> (* check the references in the object *) IF tc # Fill_1_type AND tc # Fill_N_type THEN VAR proc := RT0u.types[tc].mapProc; BEGIN IF proc # NIL THEN IF desc[p - p0].protected THEN proc(ProtectedOlderRefSanityCheck, NIL, h + ADRSIZE(Header), TracedRefTypes); ELSE proc(RefSanityCheck, NIL, h + ADRSIZE(Header), TracedRefTypes); END; END; END; END; END; INC(h, ADRSIZE(Header) + ReferentSize(h)); END; IF h > he THEN <* ASSERT HeaderToPage(h - 1) = p + PageCount(p) - 1 *> ELSE <* ASSERT PageCount(p) = 1 *> END; END; VAR n := PageCount(p); d := desc[p - p0]; BEGIN d.continued := TRUE; d.link := Nil; LOOP INC(p); DEC(n); IF n = 0 THEN EXIT; END; VAR dd := desc[p - p0]; BEGIN dd.link := Nil; <* ASSERT dd = d *> END; END; END; | Space.Free => <* ASSERT NOT desc[p - p0].continued *> INC(p); END; END; <* ASSERT p = p1 *> END SanityCheck; PROCEDURE RefSanityCheck (<*UNUSED*> arg : REFANY; cp : ADDRESS; <*UNUSED*> root: ADDRESS; <*UNUSED*> kind: RT0.RefType) = VAR ref := LOOPHOLE(cp, REF RefReferent)^; BEGIN IF ref # NIL THEN VAR p := ReferentToPage(ref); h := HeaderOf(ref); tc := h.typecode; BEGIN IF p0 <= p AND p < p1 THEN <* ASSERT desc[p - p0].space = Space.Current *> <* ASSERT NOT desc[p - p0].continued *> <* ASSERT (0 < tc AND tc < RT0u.nTypes) OR tc = Fill_1_type OR tc = Fill_N_type *> ELSE (* the compiler generates Text.T that are not in the traced heap *) <* ASSERT tc = 1 *> END; END; END; END RefSanityCheck; PROCEDURE ProtectedOlderRefSanityCheck (<*UNUSED*> arg : REFANY; cp : ADDRESS; <*UNUSED*> root: ADDRESS; <*UNUSED*> kind: RT0.RefType) = VAR ref := LOOPHOLE(cp, REF RefReferent)^; BEGIN IF ref # NIL THEN VAR p := ReferentToPage(ref); h := HeaderOf(ref); tc := h.typecode; BEGIN IF p0 <= p AND p < p1 THEN <* ASSERT desc[p - p0].space = Space.Current *> <* ASSERT desc[p - p0].generation = Generation.Older *> <* ASSERT NOT desc[p - p0].continued *> <* ASSERT (0 < tc AND tc < RT0u.nTypes) OR tc = Fill_1_type OR tc = Fill_N_type *> ELSE (* the compiler generates Text.T that are not in the traced heap *) <* ASSERT tc = 1 *> END; END; END; END ProtectedOlderRefSanityCheck; (* ----------------------------------------------------------------------- *) PROCEDURE VisitAllRefs (v: RefVisitor) = VAR tc: Typecode; BEGIN TRY GCOff(); FinishGC(); FOR p := p0 TO p1 - 1 DO IF desc[p - p0].space = Space.Current AND NOT desc[p - p0].continued THEN VAR h := PageToHeader(p); he := PageToHeader(p + 1); size: INTEGER; BEGIN WHILE h < he AND (p # newPage OR h < newPtr) DO size := ReferentSize(h); tc := h.typecode; IF tc # Fill_1_type AND tc # Fill_N_type THEN IF NOT v.visit( tc, LOOPHOLE(h + ADRSIZE(Header), REFANY), size) THEN RETURN; END; END; INC(h, ADRSIZE(Header) + size); END; END; END; END; FINALLY GCOn(); END; END VisitAllRefs; TYPE CountClosure = MonitorClosure OBJECT tcs : REF ARRAY OF Typecode; counts : REF ARRAY OF CARDINAL; visitor: RefVisitor; OVERRIDES after := CountRefsForTypecodes; END; TYPE CountAllClosure = MonitorClosure OBJECT counts : REF ARRAY OF CARDINAL; visitor: RefVisitor; OVERRIDES after := CountRefsForAllTypecodes; END; PROCEDURE ShowCounts (READONLY tcs: ARRAY OF Typecode): MonitorClosure = VAR cl: CountClosure; BEGIN cl := NEW(CountClosure); cl.tcs := NEW(REF ARRAY OF Typecode, NUMBER(tcs)); cl.tcs^ := tcs; cl.counts := NEW(REF ARRAY OF CARDINAL, NUMBER(tcs)); cl.visitor := NEW(CountVisitor, cl := cl); RegisterMonitor(cl); RETURN cl; END ShowCounts; PROCEDURE ShowAllCounts (): MonitorClosure = VAR cl: CountAllClosure; BEGIN cl := NEW(CountAllClosure); cl.counts := NEW(REF ARRAY OF CARDINAL, RT0u.nTypes); cl.visitor := NEW(CountAllVisitor, cl := cl); RegisterMonitor(cl); RETURN cl; END ShowAllCounts; TYPE CountVisitor = RefVisitor OBJECT cl: CountClosure OVERRIDES visit := One; END; CountAllVisitor = RefVisitor OBJECT cl: CountAllClosure OVERRIDES visit := All; END; PROCEDURE One ( self: CountVisitor; tc : Typecode; <*UNUSED*> r : REFANY; <*UNUSED*> size: CARDINAL ): BOOLEAN = BEGIN FOR i := FIRST(self.cl.tcs^) TO LAST(self.cl.tcs^) DO IF self.cl.tcs[i] = tc THEN INC(self.cl.counts[i]); RETURN TRUE; END; END; RETURN TRUE; END One; PROCEDURE All ( self: CountAllVisitor; tc : Typecode; <*UNUSED*> r : REFANY; <*UNUSED*> size: CARDINAL ): BOOLEAN = BEGIN INC(self.cl.counts[tc]); RETURN TRUE; END All; PROCEDURE CountRefsForTypecodes (cl: CountClosure) = BEGIN FOR i := FIRST(cl.counts^) TO LAST(cl.counts^) DO cl.counts[i] := 0; END; VisitAllRefs(cl.visitor); FOR i := FIRST(cl.tcs^) TO LAST(cl.tcs^) DO PutText(stderr, "count["); PutInt(stderr, cl.tcs[i]); PutText(stderr, "] = "); PutInt(stderr, cl.counts[i]); IF i # LAST(cl.tcs^) THEN PutText(stderr, ", "); END; END; PutText(stderr, "\n"); END CountRefsForTypecodes; PROCEDURE CountRefsForAllTypecodes (cl: CountAllClosure) = BEGIN FOR i := FIRST(cl.counts^) TO LAST(cl.counts^) DO cl.counts[i] := 0; END; VisitAllRefs(cl.visitor); FOR i := FIRST(cl.counts^) TO LAST(cl.counts^) DO IF cl.counts[i] > 1 THEN PutInt(stderr, i); PutText(stderr, ": "); PutInt(stderr, cl.counts[i]); IF i # LAST(cl.counts^) THEN PutText(stderr, ", "); END; END; END; PutText(stderr, "\n"); END CountRefsForAllTypecodes; (* ---------------------------------------------------- initialization *) CONST MaxAlignment = 8; CONST MaxAlignMask = 2_0111; (* bit mask to capture MaxAlignment *) TYPE MaxAlignRange = [0 .. MaxAlignment - 1]; VAR align: ARRAY MaxAlignRange, [1 .. MaxAlignment] OF INTEGER; (* align[i,j] == RTMisc.Align (i, j) - i *) PROCEDURE CheckTypes () = (* called by RTType.Init after type registration, but before any allocation *) VAR is_power: ARRAY [0 .. MaxAlignment] OF BOOLEAN; size : INTEGER; BEGIN (* ensure that init_cache is large enough *) <*ASSERT RT0u.nTypes <= NUMBER (init_cache) *> (* check that it's safe to eliminate the #A call to upper ... *) FOR i := 0 TO RT0u.nTypes - 1 DO WITH def = RT0u.types[i]^ DO IF def.traced AND def.nDimensions = 0 THEN size := def.dataSize; <*ASSERT size = RTMisc.Upper (size, BYTESIZE (Header)) *> END; END; END; (* compute the small powers of two *) FOR i := FIRST(is_power) TO LAST(is_power) DO is_power[i] := FALSE END; is_power[1] := TRUE; is_power[2] := TRUE; is_power[4] := TRUE; is_power[8] := TRUE; <* ASSERT MaxAlignment <= 8 *> (* check that all data alignments are small powers of two so that "RTMisc.Align (addr, alignment)" can be safely replaced by "addr + align [Word.And (addr, 7), alignment]" in Gcalloc.*) FOR i := 0 TO RT0u.nTypes - 1 DO WITH def = RT0u.types[i]^ DO IF def.traced THEN <*ASSERT is_power [def.dataAlignment] *> END; END; END; (* initialize the alignment array *) FOR i := FIRST(align) TO LAST(align) DO FOR j := FIRST(align[0]) TO LAST(align[0]) DO align[i, j] := RTMisc.Upper(i, j) - i; END; END; END CheckTypes; (* ---------------------------------------------------- showheap hooks *) VAR perfR, perfW: Ctypes.int; perfOn : BOOLEAN := FALSE; CONST EventSize = (BITSIZE(RTHeapEvent.T) + BITSIZE(CHAR) - 1) DIV BITSIZE( CHAR); PROCEDURE PerfStart () = VAR i, j: Page; BEGIN IF LowPerfTool.ParamStartAndWait("showheap", perfR, perfW) THEN perfOn := TRUE; EVAL RTMisc.RegisterExitor(PerfStop); PerfGrow(p0, p1 - p0); i := p0; WHILE i # Nil AND i < p1 DO j := i + 1; WHILE j < p1 AND desc[j - p0].continued DO INC(j); END; IF desc[i - p0].space # Space.Free THEN PerfChange(i, j - i); END; i := j; END; END; END PerfStart; PROCEDURE PerfFlip () = VAR e := RTHeapEvent.T{kind := RTHeapEvent.Kind.Flip}; BEGIN perfOn := Uuio.write(perfW, ADR(e), EventSize) # -1; END PerfFlip; PROCEDURE PerfPromotedRoots () = VAR e := RTHeapEvent.T{kind := RTHeapEvent.Kind.Roots}; BEGIN perfOn := Uuio.write(perfW, ADR(e), EventSize) # -1; END PerfPromotedRoots; PROCEDURE PerfStop (<*UNUSED*> n: INTEGER := 0) = VAR e := RTHeapEvent.T{kind := RTHeapEvent.Kind.Bye}; BEGIN (* UNSAFE, but needed to prevent deadlock if we're crashing! *) EVAL Uuio.write(perfW, ADR(e), EventSize); EVAL Unix.close(perfW); END PerfStop; PROCEDURE PerfAllow (<*UNUSED*> n: INTEGER := 0) = VAR e := RTHeapEvent.T{kind := RTHeapEvent.Kind.Off, nb := gcOff}; BEGIN perfOn := Uuio.write(perfW, ADR(e), EventSize) # -1; END PerfAllow; PROCEDURE PerfBegin () = VAR e := RTHeapEvent.T{kind := RTHeapEvent.Kind.Begin}; BEGIN perfOn := Uuio.write(perfW, ADR(e), EventSize) # -1 END PerfBegin; PROCEDURE PerfEnd () = VAR e := RTHeapEvent.T{kind := RTHeapEvent.Kind.End}; BEGIN perfOn := Uuio.write(perfW, ADR(e), EventSize) # -1; END PerfEnd; PROCEDURE PerfChange (first: Page; nb: CARDINAL) = VAR e := RTHeapEvent.T{kind := RTHeapEvent.Kind.Change, first := first, nb := nb, desc := desc[first - p0]}; BEGIN perfOn := Uuio.write(perfW, ADR(e), EventSize) # -1; END PerfChange; PROCEDURE PerfGrow (firstNew: Page; nb: CARDINAL) = VAR e := RTHeapEvent.T{ kind := RTHeapEvent.Kind.Grow, first := firstNew, nb := nb}; BEGIN perfOn := Uuio.write(perfW, ADR(e), EventSize) # -1; END PerfGrow; (* ----------------------------------------------------------------------- *) VAR mutex := NEW(MUTEX); (* RTWeakRef *) (* weakTable contains three singly-linked lists: for entries in use (rooted at index weakLive0), dead entries awaiting cleanup (at weakDead0), and free entries (at weakFree0). Entries in use contain the weak ref, the REF, and the procedure. The "a" field of the weak ref is the index in the table; this speeds lookup. The "b" field is a unique value, taken from a 32-bit counter. Dead entries contain the same dields, but the "a" field of the weak ref is set to -1 to keep lookups from succeeding. When the cleanup procedure is to be called, the original weak ref can still be reconstructed, since the "a" field was the index. *) VAR weakTable := NEW(UNTRACED REF ARRAY OF WeakEntry, 0); weakLive0 := -1; (* the root of the in-use list *) weakDead0 := -1; (* the root of the dead list *) weakFree0 := -1; (* the root of the free list *) TYPE WeakRefAB = RECORD a: BITS 32 FOR INTEGER; b: BITS 32 FOR Word.T; END; WeakEntry = RECORD t : WeakRefAB; r : RefReferent; p : WeakRefCleanUpProc; next: INTEGER; END; (* This is WeakRef.FromRef, which returns a new weak ref for an object. *) PROCEDURE WeakRefFromRef (r: REFANY; p: WeakRefCleanUpProc := NIL): WeakRef = BEGIN <* ASSERT r # NIL *> LOCK mutex DO INC(RT0u.inCritical); (* create a WeakCleaner thread the first time through *) IF weakCleaner = NIL THEN weakCleaner := Thread.Fork(NEW(Thread.Closure, apply := WeakCleaner)); END; (* if necessary, expand weakTable *) IF weakFree0 = -1 THEN VAR newTable := NEW(UNTRACED REF ARRAY OF WeakEntry, 2 * NUMBER(weakTable^) + 1); BEGIN SUBARRAY(newTable^, 0, NUMBER(weakTable^)) := weakTable^; FOR i := NUMBER(weakTable^) TO NUMBER(newTable^) - 1 DO WITH entry = newTable[i] DO entry.t.b := 0; entry.next := weakFree0; weakFree0 := i; END; END; weakTable := newTable; END; END; IF p # NIL THEN (* mark the object as having a weak ref with non-nil cleanup *) VAR header := HeaderOf(LOOPHOLE(r, ADDRESS)); BEGIN <* ASSERT NOT header^.weak *> header^.weak := TRUE; END; END; (* allocate a new entry *) VAR i := weakFree0; BEGIN weakFree0 := weakTable[i].next; (* generate a new weak ref *) VAR t := WeakRefAB{a := i, b := Word.Plus(weakTable[i].t.b, 1)}; BEGIN <* ASSERT t.b # 0 *> (* set up the entry *) weakTable[i] := WeakEntry{t := t, r := LOOPHOLE(r, RefReferent), p := p, next := weakLive0}; weakLive0 := i; DEC(RT0u.inCritical); RETURN LOOPHOLE(t, WeakRef); END; END; END; END WeakRefFromRef; (* This is WeakRef.FromRef, which inverts ToRef *) PROCEDURE WeakRefToRef (t: WeakRef): REFANY = BEGIN LOCK mutex DO INC(RT0u.inCritical); (* if the weak ref is not dead, we know the index *) WITH entry = weakTable[LOOPHOLE(t, WeakRefAB).a] DO VAR r: REFANY := NIL; BEGIN (* check the weak ref there *) IF entry.t = LOOPHOLE(t, WeakRefAB) THEN INC(RT0u.inCritical); CollectorOn(); Move(NIL, ADR(entry.r), NIL, RT0.RefType.Traced); CollectorOff(); DEC(RT0u.inCritical); r := LOOPHOLE(entry.r, REFANY); END; DEC(RT0u.inCritical); RETURN r; END; END; END; END WeakRefToRef; VAR weakCleaner : Thread.T; weakCondition := NEW(Thread.Condition); (* WeakCleaner waits for entries to be placed on the dead list, then cleans them up and puts them on the free list. *) PROCEDURE WeakCleaner (<*UNUSED*> closure: Thread.Closure): REFANY = VAR i : INTEGER; copy: WeakEntry; BEGIN LOOP (* get an entry to handle. copy its contents, then put it on the free list. *) LOCK mutex DO WHILE weakDead0 = -1 DO Thread.Wait(mutex, weakCondition); END; INC(RT0u.inCritical); i := weakDead0; WITH entry = weakTable[i] DO <* ASSERT entry.t.a = -1 *> INC(RT0u.inCritical); CollectorOn(); Move(NIL, ADR(entry.r), NIL, RT0.RefType.Traced); CollectorOff(); DEC(RT0u.inCritical); copy := entry; weakDead0 := entry.next; entry.next := weakFree0; weakFree0 := i; END; DEC(RT0u.inCritical); END; (* call the registered procedure. note that collections are allowed; the copy is kept on the stack so the object won't be freed during the call. *) IF copy.p # NIL THEN copy.p(LOOPHOLE(WeakRefAB{a := i, b := copy.t.b}, WeakRef), LOOPHOLE(copy.r, REFANY)); END; copy.r := NIL; (* to help conservative collector *) END; END WeakCleaner; (* ----------------------------------------------------------------------- *) PROCEDURE FirstPage (p: Page): Page = BEGIN WHILE desc[p - p0].continued DO DEC(p); END; RETURN p; END FirstPage; PROCEDURE PageCount (p: Page): CARDINAL = VAR n := 0; BEGIN <* ASSERT NOT desc[p - p0].continued *> REPEAT INC(p); INC(n); UNTIL p >= p1 OR NOT desc[p - p0].continued; RETURN n; END PageCount; (* ----------------------------------------------------------------------- *) PROCEDURE Protect (p: Page; readable, writable: BOOLEAN) = VAR n := PageCount(p); BEGIN <* ASSERT collectorOn OR (readable AND writable) *> <* ASSERT gcOff = 0 *> <* ASSERT RTHeapDep.VM *> <* NOWARN *><* ASSERT TRUE *> RTHeapDep.Protect(p, n, readable, writable); VAR protected := NOT (readable AND writable); BEGIN FOR i := 0 TO n - 1 DO desc[p + i - p0].protected := protected; END; END; IF perfOn THEN PerfChange(p, n); END; END Protect; PROCEDURE Unprotect (p: Page) = BEGIN Protect(p, readable := TRUE, writable := TRUE); END Unprotect; PROCEDURE Fault (addr: ADDRESS): BOOLEAN = VAR p := LOOPHOLE(addr, INTEGER) DIV BytesPerPage; BEGIN INC(RT0u.inCritical); <* ASSERT RTHeapDep.VM *> <* NOWARN *><* ASSERT TRUE *> cycleCost := cycleCost + RTHeapDep.VMFaultTime(); (* otherwise unaccounted for *) IF NOT (p0 <= p AND p < p1) OR desc[p - p0].space = Space.Unallocated THEN DEC(RT0u.inCritical); RETURN FALSE; (* not in heap *) END; IF NOT desc[p - p0].protected THEN DEC(RT0u.inCritical); RETURN TRUE; (* was protected, but not any more *) END; <* ASSERT NOT desc[p - p0].pure *> IF desc[p - p0].gray THEN CollectorOn(); IF p = impureCopyPage THEN IF CopySome() THEN <* ASSERT NOT desc[p - p0].gray *> ELSE IF desc[p - p0].gray THEN <* ASSERT p = impureCopyPage AND impureCopyStack = Nil *> InsertFiller(impureCopyPtr, impureCopyBoundary - impureCopyPtr); impureCopyPage := Nil; impureCopyStack := Nil; impureCopyPtr := NIL; impureCopyBoundary := NIL; FOR i := 0 TO PageCount(p) - 1 DO desc[p + i - p0].gray := FALSE; END; IF desc[p - p0].generation = Generation.Older THEN <* ASSERT desc[p - p0].space = Space.Current *> Protect(p, readable := TRUE, writable := FALSE); END; IF perfOn THEN PerfChange(p, 1); END; END; END; ELSE p := FirstPage(p); Unprotect(p); CleanBetween(PageToHeader(p), PageToHeader(p + 1)); FOR i := 0 TO PageCount(p) - 1 DO desc[p + i - p0].gray := FALSE; END; IF desc[p - p0].generation = Generation.Older THEN <* ASSERT desc[p - p0].space = Space.Current *> Protect(p, readable := TRUE, writable := FALSE); END; IF perfOn THEN PerfChange(p, PageCount(p)); END; END; CollectorOff(); ELSE p := FirstPage(p); <* ASSERT desc[p - p0].generation = Generation.Older *> Unprotect(p); END; DEC(RT0u.inCritical); RETURN TRUE; (* was protected, protection cleared *) END Fault; (* ----------------------------------------------------------------------- *) (* The inner-loop collector action is to pick a gray page and completely clean it (i.e., make its referents at least gray, so that the page becomes black). The current gray page, "impureCopyPage" is distinguished; it's the page that newly gray objects are copied to. To improve locality of referene in the new space, we keep the set of gray pages as a stack. This helps approximate a depth-first copy to newspace. The current page is not a member of the stack, but will become one when it becomes full. The current page is always the page that contains newPtr. To reduce page faults, we separate the "pure" copy pages (those whose objects contain no REFs) from the "impure" ones (those with REFs). Only impure pages become gray, since pure pages can have no REFs into the old space (since they have no REFs at all). *) VAR impureCopyPage: Page := Nil; (* the current impure copy page *) impureCopyStack: Page := Nil; (* threaded through the "link" field; ends at Nil *) (* By analogy, we also maintain "pureCopyPage" and "pureCopyStack". These are not used, but maintaining them simplifies the code. *) VAR pureCopyPage: Page; (* the current pure copy page *) pureCopyStack: Page; (* threaded through the "link" field; ends at Nil *) (* By analogy, we also maintain "newPage" and "newStack". As with pureCopyPage and pureCopyStack, these are not used, but maintaining them simplifies the code. *) VAR newPage: Page; (* the current new page *) newStack: Page; (* threaded through the "link" field; ends at Nil *) (* Minimum number of bytes by which to grow the heap. Setting it higher reduces the number of system calls; setting it lower keeps the heap a little smaller. *) (* ----------------------------------------------------------------------- *) VAR startedBackground := FALSE; PROCEDURE StartBackgroundCollection () = VAR start := FALSE; BEGIN LOCK mutex DO IF NOT startedBackground THEN start := TRUE; startedBackground := TRUE; END; END; IF start THEN EVAL Thread.Fork(NEW(Thread.Closure, apply := BackgroundThread)); END; END StartBackgroundCollection; (****** Page-level allocator ******) (* The freelist is sorted by blocksize, linked through the first page in each block, using the "link" field in the "desc" array. Page allocation is best-fit. For elements of the same blocksize, they are sorted by page number, to make the showheap display more easily readable, and to slightly reduce fragmentation. *) (* FindFreePages allocates a run of "n" free pages, which we would prefer not be near pages in the current space with notes in notAfter. The allocator can thus be used to separate pages with different notes, since they will have different lifetimes. This is a concern only when incremental and generational collection are combined. *) PROCEDURE FindFreePages (n: INTEGER; notAfter: SET OF Note): Page = VAR p: Page; BEGIN IF collectorState = CollectorState.Zero THEN p := AllocateFreePagesFromBlock(n, SET OF Note{}, TRUE); IF p # Nil THEN RETURN p; END; ELSE p := AllocateFreePagesFromBlock(n, notAfter, TRUE); IF p # Nil THEN RETURN p; END; p := AllocateFreePagesFromBlock(n, SET OF Note{}, FALSE); IF p # Nil THEN RETURN p; END; END; GrowHeap(n); p := AllocateFreePagesFromBlock(n, SET OF Note{}, TRUE); <* ASSERT p # Nil *> RETURN p; END FindFreePages; VAR free: Page; (* the head of the freelist *) (* AllocateFreePagesFromBlock finds the first block large enough to satisfy the request. "notAfter" is the set of page notes in the current space that the block allocated from must not immediately follow; this is used to separate Note.Allocated pages from Note.Copied pages. If "front" is TRUE, the pages will be allocated from the beginning of the block, else from the end; this is also used to separate Note.Allocated Pages from Note.Copied pages. If the block is bigger than the request, the remainder is left at the right point in the freelist. If no block exists, Nil is returned. *) PROCEDURE AllocateFreePagesFromBlock (n : INTEGER; notAfter: SET OF Note; front : BOOLEAN ): Page = VAR p := free; prevP := Nil; prevLength := 0; length : INTEGER; BEGIN LOOP IF p = Nil THEN RETURN Nil; END; length := FreeLength(p); IF length >= n AND NOT (p > p0 AND desc[(p - 1) - p0].space = Space.Current AND desc[(p - 1) - p0].note IN notAfter) THEN EXIT; END; prevP := p; prevLength := length; p := desc[p - p0].link; END; IF length = n THEN IF prevP = Nil THEN free := desc[p - p0].link; ELSE desc[prevP - p0].link := desc[p - p0].link; END; RETURN p; ELSE VAR newP, fragP: Page; fragLength : CARDINAL; BEGIN IF front THEN newP := p; fragP := p + n; ELSE newP := p + length - n; fragP := p; END; fragLength := length - n; IF fragLength > prevLength THEN IF prevP = Nil THEN free := fragP; ELSE desc[prevP - p0].link := fragP; END; desc[fragP - p0].link := desc[p - p0].link; ELSE IF prevP = Nil THEN free := desc[p - p0].link; ELSE desc[prevP - p0].link := desc[p - p0].link; END; VAR pp := free; prevPP := Nil; BEGIN LOOP IF pp = Nil THEN EXIT; END; VAR length := FreeLength(pp); BEGIN IF length > fragLength OR (length = fragLength AND pp > fragP) THEN EXIT; END; END; prevPP := pp; pp := desc[pp - p0].link; END; desc[fragP - p0].link := pp; IF prevPP = Nil THEN free := fragP; ELSE desc[prevPP - p0].link := fragP; END; END; END; RETURN newP; END; END; END AllocateFreePagesFromBlock; (* RebuildFreelist rebuilds the free list, from the "desc" array. It first links all free blocks into the free list, then it sorts the free list. The sort used is insertion sort, which is quadratic in the number of different block sizes, but only linear in the number of pages. *) PROCEDURE RebuildFreelist () = BEGIN VAR prevP := Nil; prevSpace := Space.Unallocated; BEGIN (* link together the first pages of all free blocks *) FOR p := p0 TO p1 - 1 DO VAR space := desc[p - p0].space; BEGIN IF space = Space.Free AND prevSpace # Space.Free THEN IF prevP = Nil THEN free := p; ELSE desc[prevP - p0].link := p; END; prevP := p; END; prevSpace := space; END; END; IF prevP = Nil THEN free := Nil; ELSE desc[prevP - p0].link := Nil; END; END; (* sort them, using insertion sort *) VAR n := 1; (* smallest block size *) p := free; (* start of sublist we're examining *) prevP := Nil; (* element before sublist *) BEGIN LOOP VAR excess := Nil; prevExcess := Nil; BEGIN (* separate off blocks over "n" long into excess list *) WHILE p # Nil DO VAR length := FreeLength(p); BEGIN <* ASSERT length >= n *> IF length > n THEN IF prevExcess = Nil THEN excess := p; ELSE desc[prevExcess - p0].link := p; END; IF prevP = Nil THEN free := desc[p - p0].link; ELSE desc[prevP - p0].link := desc[p - p0].link; END; prevExcess := p; ELSE prevP := p; END; END; p := desc[p - p0].link; END; (* maybe done *) IF excess = Nil THEN EXIT; END; <* ASSERT prevExcess # Nil *> (* link longer blocks onto end *) IF prevP = Nil THEN free := excess; ELSE desc[prevP - p0].link := excess; END; desc[prevExcess - p0].link := Nil; p := excess; END; (* find smallest element size of remaining bocks *) n := LAST(CARDINAL); VAR pp := p; BEGIN REPEAT VAR length := FreeLength(pp); BEGIN IF length < n THEN n := length; END; END; pp := desc[pp - p0].link; UNTIL pp = Nil; END; END; END; END RebuildFreelist; (* FreeLength returns the number of free pages starting at page p. *) PROCEDURE FreeLength (p: Page): CARDINAL = BEGIN <* ASSERT desc[p - p0].space = Space.Free *> VAR pp := p + 1; BEGIN LOOP IF pp >= p1 THEN EXIT; END; IF desc[pp - p0].space # Space.Free THEN EXIT; END; INC(pp); END; RETURN pp - p; END; END FreeLength; (* GrowHeap adds a block of at least "minNewPages" free pages to the heap, and links it into the free list. *) VAR fragment0, fragment1: ADDRESS := NIL; CONST InitialBytes = 262144; (* initial heap size is 256K *) MinNewBytes = 262144; (* grow the heap by at least 256K *) MinNewFactor = 0.2; (* grow the heap by at least 20% *) PROCEDURE GrowHeap (pp: INTEGER) = VAR newChunk : ADDRESS; newSideSpan : INTEGER; firstNewPage: Page; lastNewPage : Page; newP0 : Page; newP1 : Page; BEGIN IF allocatedPages = 0 THEN pp := MAX(pp, (InitialBytes + BytesPerPage - 1) DIV BytesPerPage); ELSE pp := MAX(pp, (MinNewBytes + BytesPerPage - 1) DIV BytesPerPage); pp := MAX(pp, CEILING(FLOAT(allocatedPages) * MinNewFactor)); END; VAR bytes := (pp + 1) * BytesPerPage; BEGIN newChunk := LOOPHOLE(Unix.sbrk(bytes), ADDRESS); IF newChunk = NIL OR newChunk = LOOPHOLE(-1, ADDRESS) THEN FatalError(NIL, 0, "gc: Could not extend the traced heap"); END; IF fragment1 = newChunk THEN newChunk := fragment0; bytes := bytes + (fragment1 - fragment0); END; VAR excess := (-LOOPHOLE(newChunk, INTEGER)) MOD BytesPerPage; BEGIN INC(newChunk, excess); DEC(bytes, excess); END; VAR pages := bytes DIV BytesPerPage; BEGIN firstNewPage := LOOPHOLE(newChunk, INTEGER) DIV BytesPerPage; lastNewPage := firstNewPage + pages - 1; fragment0 := LOOPHOLE((firstNewPage + pages) * BytesPerPage, ADDRESS); fragment1 := newChunk + bytes; END; END; (* determine the new boundaries of the allocated pages *) IF p0 = Nil THEN newP0 := firstNewPage; newP1 := lastNewPage + 1; ELSIF firstNewPage < p0 THEN newP0 := firstNewPage; newP1 := p1; ELSIF p1 <= lastNewPage THEN newP0 := p0; newP1 := lastNewPage + 1; ELSE newP0 := p0; newP1 := p1; END; (* extend the side arrays if necessary *) newSideSpan := newP1 - newP0; IF desc = NIL OR newSideSpan # NUMBER(desc^) THEN WITH newDesc = NEW(UNTRACED REF ARRAY OF Desc, newSideSpan) DO IF desc # NIL THEN FOR i := FIRST(desc^) TO LAST(desc^) DO newDesc[i + p0 - newP0] := desc[i]; END; FOR i := p1 TO firstNewPage - 1 DO newDesc[i - newP0].space := Space.Unallocated; END; FOR i := lastNewPage + 1 TO p0 - 1 DO newDesc[i - newP0].space := Space.Unallocated; END; DISPOSE(desc); END; desc := newDesc; END; END; p0 := newP0; p1 := newP1; FOR i := firstNewPage TO lastNewPage DO desc[i - p0].space := Space.Free; END; IF perfOn THEN PerfGrow(firstNewPage, lastNewPage - firstNewPage + 1); END; INC(allocatedPages, lastNewPage - firstNewPage + 1); RebuildFreelist(); END GrowHeap; BEGIN IF RTParams.IsPresent("nogc") THEN gcOff := 1; END; IF RTParams.IsPresent("novm") THEN vmOff := 1; END; IF RTParams.IsPresent("noincremental") THEN incremental := FALSE; END; IF RTParams.IsPresent("nogenerational") THEN generational := FALSE; END; IF RTParams.IsPresent("paranoidgc") THEN InstallSanityCheck(); END; PerfStart(); END RTHeap.