(* Copyright (C) 1990, Digital Equipment Corporation *) (* All rights reserved. *) (* See the file COPYRIGHT for a full description. *) (* Last modified on Wed Sep 23 12:21:33 PDT 1992 by jdd *) (* modified on Sat Jun 27 15:49:16 PDT 1992 by muller *) (* modified on Thu Feb 27 16:04:52 PST 1992 by kalsow *) UNSAFE MODULE RTHeap EXPORTS RTHeap, RTHeapRep, RTHeapPolicy, RTWeakRef; IMPORT RT0, RT0u, RTMisc, RTMain, ThreadF, Cstdlib, Ctypes; IMPORT RTStack, Thread, Word, RTParams, Unix; IMPORT RTHeapEvent, LowPerfTool, Uuio; FROM RT0 IMPORT Typecode; FROM RTMisc IMPORT FatalError, FatalErrorI; FROM SmallIO IMPORT stderr, PutText, PutInt, PutHexa, Flush; (* This module implements the memory management part of the run-time system for Modula-3. Both the TRACED and UNTRACED referents are processed here. The allocator/garbage collector for the TRACED referents is an adaptation of the algorithm presented in the WRL Research Report 88/2, ``Compacting Garbage Collection with Ambiguous Roots'', by Joel F. Bartlett; see this report for a detailled presentation. The allocator/garbage collector for the UNTRACED referents is simply malloc/free. *) (*----------------------------------- low-level allocation and collection ---*) (* We assume that references (values of the types ADDRESS and REFANY) are the addresses of addressable locations and that locations with successive addresses are contiguous (that is, if a points to a n-locations referent then these n locations are at addresses a, a+1, ..., a+n-1). The memory is viewed as a collection of pages. Each page has a number that identifies it, based on the addresses that are part of this page: page p contains the addresses p * BytesPerPage to (p+1) * BytesPerPage - 1. The page size must be a multiple of the header size (see below). Given our conventions about page boundaries, this implies that the first location of a page is properly aligned for a Header. *) (* The array desc and the global variables firstAllocatedPage, and lastAllocatedPage describe the pages that are part of the traced heap. Either firstAllocatedPage and lastAllocatedPages are equal to InvalidPage and no pages are allocated; or both are valid pages and page p is allocated iff firstAllocatedPage <= p <= lastAllocatedPage AND desc [p-firstAllocatedPage] != Unallocated NUMBER (desc) must be equal to lastAllocatedPage - firstAllocatedPage + 1 if there are allocated pages. Index i in desc correspond to page i + firstAllocatedPage; that is firstAllocatedPage is the number of the first page available in desc, and it must be in [firstAllocatedPage .. firstAllocatedPage] if there are allocated pages. *) (* We keep the number of allocated pages in a global variable; it should satify the invariant: allocatedPages = sigma (i = firstAllocatedPage, lastAllocatedPage, space [i-firstAllocatedPage] # Unallocated) if there are allocated pages, = 0 otherwise. We also keep the number of active pages in a global; it satisfies: activePages = sigma (i = firstAllocatedPage, lastAllocatedPage, space [i-firstAllocatedPage] = nextSpace) if there are allocated pages, = 0 otherwise. *) (* Each referent is immediately preceded by a header that describes the type of the referent. In the user world, this header is not visible; that is, a REFANY is the address of the referent, not the address of the header. Each referent is immediately followed by padding space so the combined size referent size + padding is a multiple of the header size. Actually, the low level routines are given a data size which is the sum of the referent size and padding size and assume this data size is a multiple of the header size. With this padding, addresses of headers and referent will always be multiple of ADRSIZE (Header). The combination of header/referent/padding space is called a "heap object". The size of a heap object is the size of the header, plus the size of the referent, plus the size of the padding. The alignment of a heap object is the greatest of the alignment of header and the alignment of the referent. We make the following assumptions: - alignment of headers is such what the addressable location following any properly aligned header is properly aligned for the type ADDRESS; and, for every referent: referent adrSize + padding adrSize >= ADRSIZE (ADDRESS) [During the garbage collection, we move heap objects. But we need to keep the forwarding information somewhere. This condition ensures that we can store the new address of the referent in the first word of the old referent.] - the pages are aligned more strictly than the headers (this means that the page size is a multiple of the header alignment). [We can put a header at the beginning of a page] *) TYPE RefReferent = ADDRESS; RefHeader = UNTRACED REF Header; PROCEDURE HeaderOf (r: RefReferent): RefHeader = BEGIN RETURN LOOPHOLE (r - ADRSIZE (Header), RefHeader); END HeaderOf; PROCEDURE TypecodeOf (r: RefReferent): Typecode = BEGIN RETURN LOOPHOLE (r - ADRSIZE (Header), RefHeader).typecode; END TypecodeOf; (* If a page is allocated, its type can be Type.Header or Type.Continued. In the first case, there is a heap object just at the beginning of the page and others following. The second case occurs when a heap object was too large to fit on a page: it starts at the beginning of a Type.Header page and overflows on contiguous Type.Continued pages. Whatever space is left on the last Type.Continued page is never used. In other words, all the headers are on Type.Header pages. Actually, heap objects do not need to be contiguous. Indeed, alignment constraints would make it difficult to ensure that property. <*description of fillers*> *) (* We need to be able to determine the size of an referent during collection; here is a functions to do just that. It must be called with a non-nil pointer to the Header of a heap object that is there (has not been moved). *) PROCEDURE ReferentSize (h: RefHeader): CARDINAL = VAR res: INTEGER; tc := h.typecode; BEGIN IF tc = Fill_1_type THEN RETURN 0; END; IF tc = Fill_N_type THEN res := LOOPHOLE (h + ADRSIZE (Header), UNTRACED REF INTEGER)^; RETURN res - BYTESIZE (Header); END; WITH def = RT0u.types [tc]^ DO IF (def.nDimensions = 0) THEN (* the typecell datasize tells the truth *) RETURN def.dataSize; END; (* ELSE, the referent is an open array; it has the following layout: pointer to the elements (ADDRESS) size 1 .... size n optional padding elements .... where n is the number of open dimensions (given by the definition) and each size is the number of elements along the dimension *) VAR sizes := LOOPHOLE (h + ADRSIZE (Header) + ADRSIZE (UNTRACED REF INTEGER), (* elt pointer*) UNTRACED REF INTEGER); BEGIN res := 1; FOR i := 0 TO def.nDimensions - 1 DO res := res * sizes^; INC (sizes, ADRSIZE (INTEGER)); END; res := res * def.elementSize; END; res := RTMisc.Upper (res + def.dataSize, BYTESIZE (Header)); END; (*WITH def*) RETURN res; END ReferentSize; (* Given an allocated page, we often need to find the next allocated one. *) PROCEDURE NextPage (p: Page): Page = BEGIN REPEAT INC (p); UNTIL p > lastAllocatedPage OR desc [p - firstAllocatedPage].space # Space.Unallocated; IF p > lastAllocatedPage THEN RETURN firstAllocatedPage; ELSE RETURN p; END; END NextPage; (* The convention about page numbering allows for a simple conversion from an address to the number of the page in which it is, as well as from a page number to the first address is contains: *) PROCEDURE ReferentToPage (r: RefReferent): Page = VAR p: INTEGER; BEGIN p := LOOPHOLE (r, INTEGER) DIV BytesPerPage; IF p < firstAllocatedPage OR p > lastAllocatedPage OR desc [p - firstAllocatedPage].space = Space.Unallocated THEN RETURN (InvalidPage); ELSE RETURN (p); END; END ReferentToPage; PROCEDURE HeaderToPage (r: RefHeader): Page = VAR p: INTEGER; BEGIN p := LOOPHOLE (r, INTEGER) DIV BytesPerPage; IF p < firstAllocatedPage OR p > lastAllocatedPage OR desc [p - firstAllocatedPage].space = Space.Unallocated THEN RETURN (InvalidPage); ELSE RETURN (p); END; END HeaderToPage; PROCEDURE PageToHeader (p: Page): RefHeader = BEGIN RETURN LOOPHOLE (p * BytesPerPage, RefHeader); END PageToHeader; PROCEDURE PageToAddress (p: Page): ADDRESS = BEGIN RETURN LOOPHOLE (p * BytesPerPage, ADDRESS); END PageToAddress; (* We remember where we should look for free space with the following globals: *) VAR freePage : Page; (* where to look when we need free contiguous pages *) freep, boundary: RefHeader; (* memory is contiguous and free in [freep, boundary[ *) PROCEDURE Queue (p: Page) = BEGIN IF queueHead # InvalidPage THEN desc [queueTail - firstAllocatedPage].link := p; ELSE queueHead := p; END; desc [p - firstAllocatedPage].link := InvalidPage; queueTail := p; END Queue; (* To move a heap object to the new space, modifying the original reference to it *) PROCEDURE Move (<*UNUSED*> arg : REFANY; cp : ADDRESS; <*UNUSED*> root: ADDRESS; <*UNUSED*> kind: RT0.RefType) = VAR refref := LOOPHOLE(cp, UNTRACED REF RefReferent); ref := refref^; BEGIN IF ref = NIL THEN RETURN; END; VAR p := ReferentToPage(ref); BEGIN IF p = InvalidPage THEN RETURN; END; VAR p0 := p - firstAllocatedPage; oldHeader := HeaderOf(ref); BEGIN (* clear marks *) IF oldHeader.marka THEN oldHeader.marka := FALSE; END; <* ASSERT NOT oldHeader.markb *> IF desc[p0].space # Space.Current THEN (* nothing to do *) ELSIF desc[p0].promoted = Promotion.ContainsFrozenRef THEN (* if the page contains frozen refs, just promote it *) PromotePage(p, Space.New, Promotion.ContainsAccessibleFrozenRef); ELSIF promoteBigObjects AND p # lastAllocatedPage AND desc[p0 + 1].type = Type.Continued THEN (* if this is a large object, just promote the pages *) PromotePage(p, Space.New, Promotion.Large); ELSIF oldHeader.forwarded THEN (* if already moved, just update the reference *) refref^ := LOOPHOLE(ref, UNTRACED REF RefReferent)^; ELSE (* move the object *) WITH def = RT0u.types[oldHeader.typecode] DO VAR dataSize := ReferentSize(oldHeader); np := Gcalloc(dataSize, def.dataAlignment); BEGIN VAR newHeader := HeaderOf(np); BEGIN RTMisc.Copy( oldHeader, newHeader, ADRSIZE(Header) + dataSize); END; IF def.nDimensions # 0 THEN (* open array: update the internal pointer *) LOOPHOLE(np, UNTRACED REF ADDRESS)^ := np + def.dataSize; END; oldHeader.forwarded := TRUE; LOOPHOLE(ref, UNTRACED REF RefReferent)^ := np; refref^ := np; END; END; END; END; END; END Move; (* Determines whether a REF has yet been moved into the new space. Follows the logic in "Move".*) PROCEDURE Moved (ref: RefReferent): BOOLEAN = BEGIN IF ref = NIL THEN RETURN TRUE; END; (* check the space *) VAR p := ReferentToPage(ref); BEGIN IF p = InvalidPage OR desc[p - firstAllocatedPage].space # Space.Current THEN RETURN TRUE; END; END; (* check the forwarded bit *) IF HeaderOf(LOOPHOLE(ref, ADDRESS)).forwarded THEN RETURN TRUE; END; (* not moved *) RETURN FALSE; END Moved; (* When an allocated page is referenced by the stack, we have to move it to the next space and insert it in the list of promoted pages. In the case where the page is actually part of a group of pages for a big referent, we have to promote all these pages to the new space, but only the first one needs to be inserted in the queue, as it is the only one containing referent headers. This routine is passed to the Threads implementation. It is called for each stack, where start and stop are the addresses of the first and last word of the stack under consideration. *) PROCEDURE collectStackLocations (start, stop: ADDRESS) = VAR fp := start; firstAllocatedAddress := PageToAddress (firstAllocatedPage); firstNonAllocatedAddress := PageToAddress (lastAllocatedPage + 1); p: ADDRESS; pp: Page; BEGIN WHILE fp <= stop DO p := LOOPHOLE (fp, UNTRACED REF ADDRESS)^; IF firstAllocatedAddress <= p AND p < firstNonAllocatedAddress THEN pp := LOOPHOLE (p, INTEGER) DIV BytesPerPage; PromotePage (pp, Space.New, Promotion.AmbiguousRoot); END; INC (fp, RTStack.PointerAlignment); END; END collectStackLocations; PROCEDURE PromotePage (p: Page; where: Space; why: Promotion) = VAR q: Page := p; large := FALSE; BEGIN IF desc [p-firstAllocatedPage].space = Space.Current THEN REPEAT INC (p); UNTIL p > lastAllocatedPage OR desc [p-firstAllocatedPage].type = Type.Header OR desc [p-firstAllocatedPage].space # Space.Current; large := p # q + 1; REPEAT DEC (p); desc [p-firstAllocatedPage].space := where; desc [p-firstAllocatedPage].promoted := why; IF perfOn THEN PerfChanged (p, 1); END; IF where = Space.New THEN IF large THEN INC (largeActivePages); ELSE INC (smallActivePages); END; END; UNTIL desc [p-firstAllocatedPage].type # Type.Continued; IF where = Space.New THEN Queue (p); END; END; END PromotePage; PROCEDURE InsertFiller (start: RefHeader; n: INTEGER) = BEGIN IF n = 0 THEN (* nothing to do *) ELSIF n = ADRSIZE (Header) THEN start^ := FillHeader1; ELSIF n >= ADRSIZE (Header) + ADRSIZE (INTEGER) THEN start^ := FillHeaderN; LOOPHOLE (start + ADRSIZE (Header), UNTRACED REF INTEGER)^ := n; ELSE <* ASSERT FALSE *> END; END InsertFiller; (* Collect is called to actually garbage collect the traced heap *) VAR collectionGoingOn := FALSE; CONST TracedRefTypes = RT0.RefTypeSet { RT0.RefType.Traced }; PROCEDURE Collect (nbPages: INTEGER) = BEGIN VAR m := monitorsHead; BEGIN WHILE m # NIL DO m.before(); m := m.next; END; END; IF perfOn THEN PerfStartCollection(); END; (* fill the rest of the current page *) InsertFiller(freep, boundary - freep); freep := boundary; collectionGoingOn := TRUE; INC(collections); (* The 'new' nextSpace is empty *) smallActivePages := 0; largeActivePages := 0; (* No pages have been promoted so far *) queueHead := InvalidPage; queueTail := InvalidPage; (* Examine the stacks for possible pointers *) ThreadF.ProcessStacks(collectStackLocations); (* Mark the pages pointed by the ice box *) FOR i := FIRST(iceBuckets^) TO LAST(iceBuckets^) DO VAR entry := iceBuckets[i]; BEGIN WHILE entry # NIL DO PromotePage(ReferentToPage(LOOPHOLE(entry.k, RefReferent)), Space.Current, Promotion.ContainsFrozenRef); entry := entry.next; END; END; END; (* Examine the global variables for possible pointers *) RTMain.GlobalMapProc(Move, NIL, TracedRefTypes); IF perfOn THEN PerfPromotedRoots(); END; Copy(); (* All refs reachable from roots have now been copied to the new space. There may be some weakly-referenced objects left behind. *) HandleWeakRefs(); FOR i := 0 TO lastAllocatedPage - firstAllocatedPage DO desc[i].space := switch[desc[i].space]; END; IF perfOn THEN PerfFlip(); END; VAR availPages := allocatedPages DIV 2; freePages := availPages - smallActivePages - largeActivePages; neededPages := MAX(TRUNC(recoveryRatio * FLOAT(availPages) + 0.5), MAX(minRecovery, nbPages)); BEGIN IF freePages < neededPages THEN EVAL GrowHeap(nbPages) END; END; collectionGoingOn := FALSE; (* keep only the reachable frozen refs *) FOR i := FIRST(iceBuckets^) TO LAST(iceBuckets^) DO VAR prev : IceCube := NIL; entry := iceBuckets[i]; BEGIN WHILE entry # NIL DO IF desc[ReferentToPage(LOOPHOLE(entry.k, RefReferent)) - firstAllocatedPage].space # Space.Current THEN DeleteFrozenRef(i, prev); IF prev # NIL THEN entry := prev.next; ELSE entry := iceBuckets[i]; END; ELSE prev := entry; entry := entry.next; END; END; END; END; IF perfOn THEN PerfStopCollection(); END; VAR m := monitorsTail; BEGIN WHILE m # NIL DO m.after(); m := m.prev; END; END; END Collect; PROCEDURE Copy () = VAR tc : Typecode; proc: RT0.TypeMapProc; BEGIN (* Sweep across promoted pages and move their constituent items. It may be that we examining heap objects on the same page as the page we are allocating; this means that the page we are looking at may not be full with heap objects. *) WHILE queueHead # InvalidPage DO VAR h := PageToHeader(queueHead); he := PageToHeader(queueHead + 1); BEGIN WHILE h < he AND (freep = boundary OR HeaderToPage(freep) # queueHead OR h < freep) DO <* ASSERT LOOPHOLE (h, INTEGER) MOD 4 = 0 *> tc := h.typecode; IF (tc # Fill_1_type) AND (tc # Fill_N_type) THEN <* ASSERT tc # 0 *> (* this test is enough; we'll catch the other bad values by the array index below *) proc := RT0u.types[tc].mapProc; IF proc # NIL THEN proc(Move, NIL, h + ADRSIZE(Header), TracedRefTypes); END; END; INC(h, ADRSIZE(Header) + ReferentSize(h)); END; END; queueHead := desc[queueHead - firstAllocatedPage].link; END; END Copy; (* We maintain a list in weakTable, starting at weakLive0, of weak refs and the objects they reference. This table is not considered a root. When HandleWeakRefs is entered, any object mentioned in that list is a candidate for cleanup. First, we determine which weakly-referenced objects with non-NIL cleanups ("WRNNC objects") are reachable from other WRNNC objects, by walking the old space. All such WRNNC objects are copied to new space, and all the objects they reference. All the weakly-referenced objects left in the old space can then be scheduled for cleanup; we move them from the list starting at weakLive0 to the list starting at weakDead0 in weakTable. A separate thread runs WeakCleaner, which does the calls to the procedures. Note that the refs in weakTable must be updated to point to new space. *) PROCEDURE HandleWeakRefs () = BEGIN (* get ready to allocate on a new page *) InsertFiller(freep, boundary - freep); freep := boundary; queueHead := InvalidPage; queueTail := InvalidPage; (* allocate a stack on the side for walking the old space *) InitStack(); (* iterate over the weak refs to walk the old space *) VAR i := weakLive0; BEGIN WHILE i # -1 DO (* here, all old-space WRNNC objects that have already been scanned have marka set, as do all old-space objects reachable from them; all old-space WRNNC objects that were reachable from other already-scanned WRNNC objects have been promoted to the new space. *) WITH entry = weakTable[i] DO IF entry.p # NIL AND NOT Moved(entry.r) THEN (* we haven't seen this WRNNC object before *) VAR header := HeaderOf(LOOPHOLE(entry.r, ADDRESS)); BEGIN IF NOT header.marka THEN <* ASSERT NOT header.markb *> (* visit all old-space objects reachable from it; promote all other old-space WRNNC objects reachable from it; promote all old-space objects reachable from it that have "marka" set. mark all visited nodes with "markb". *) WeakWalk1(entry.r); <* ASSERT NOT header.marka *> <* ASSERT header.markb *> (* then change all "markb" to "marka" *) WeakWalk2(entry.r); <* ASSERT header.marka *> <* ASSERT NOT header.markb *> END; END; END; i := entry.next; END; END; END; (* recursively copy all objects reachable from promoted objects. marks "marka" and "markb" are cleared when objects move to the new space. *) Copy(); (* move to a new page *) InsertFiller(freep, boundary - freep); freep := boundary; queueHead := InvalidPage; queueTail := InvalidPage; (* iterate over all weak refs. if the object hasn't been promoted, schedule a cleanup *) VAR i := weakLive0; previous := -1; BEGIN WHILE i # -1 DO WITH entry = weakTable[i] DO IF Moved(entry.r) THEN (* no cleanup this time; note new address *) Move(NIL, ADR(entry.r), NIL, RT0.RefType.Traced); previous := i; i := entry.next; ELSE (* the weak ref is dead; there are no cleanups *) VAR header := HeaderOf(LOOPHOLE(entry.r, ADDRESS)); BEGIN header.weak := FALSE; END; (* move the entry from the weakLive0 list into the weakDead0 or weakFree0 list *) VAR next := entry.next; BEGIN IF previous = -1 THEN weakLive0 := next; ELSE weakTable[previous].next := next; END; entry.t.a := -1; (* keep ToRef from succeeding *) IF entry.p # NIL THEN entry.next := weakDead0; weakDead0 := i; ELSE entry.next := weakFree0; weakFree0 := i; END; i := next; END; END; END; END; END; (* for all entries on the weakDead0 list, including those just placed there, note the new address *) VAR i := weakDead0; BEGIN WHILE i # -1 DO WITH entry = weakTable[i] DO <* ASSERT entry.t.a = -1 *> Move(NIL, ADR(entry.r), NIL, RT0.RefType.Traced); i := entry.next; END; END; END; Copy(); Thread.Signal(weakCondition); END HandleWeakRefs; (* WeakWalk1 starts at a WRNNC object and visits all objects in old space reachable from it, using "markb" to keep from visiting them more than once. All other WRNNC objects visited are promoted, as are all objects already visited from other WRNNC objects. *) PROCEDURE WeakWalk1 (ref: RefReferent) = VAR ref0 := ref; BEGIN <* ASSERT StackEmpty() *> LOOP IF NOT Moved(ref) THEN VAR header := HeaderOf(ref); BEGIN IF header.marka THEN <* ASSERT NOT header.markb *> Move(NIL, ADR(ref), NIL, RT0.RefType.Traced); ELSIF NOT header.markb THEN IF header.weak AND ref # ref0 THEN Move(NIL, ADR(ref), NIL, RT0.RefType.Traced); ELSE header.markb := TRUE; WeakWalkToPush(header); END; END; END; END; IF StackEmpty() THEN EXIT; END; ref := PopStack(); END; END WeakWalk1; (* WeakWalk2 starts at a WRNNC objects and visits all objects in the old space that are reachable from it, changing "markb" to "marka" *) PROCEDURE WeakWalk2 (ref: RefReferent) = BEGIN <* ASSERT StackEmpty() *> LOOP IF NOT Moved(ref) THEN VAR header := HeaderOf(ref); BEGIN IF header.markb THEN header.markb := FALSE; header.marka := TRUE; WeakWalkToPush(header); END; END; END; IF StackEmpty() THEN EXIT; END; ref := PopStack(); END; END WeakWalk2; (* WeakWalkToPush walks all REFs in a WRNNC object, pushing them on the stack to be visited later. *) PROCEDURE WeakWalkToPush (header: RefHeader) = VAR tc := header.typecode; BEGIN IF tc # Fill_1_type AND tc # Fill_N_type THEN <* ASSERT tc # 0 *> VAR proc := RT0u.types[tc].mapProc; BEGIN IF proc # NIL THEN proc(PushStackProcedure, NIL, header + ADRSIZE(Header), TracedRefTypes); END; END; END; END WeakWalkToPush; (* The stack for walking the old space is maintained on the heap in the new space. *) VAR stack : REF ARRAY OF RefReferent; stack0, stack1, stackA: UNTRACED REF RefReferent; stackN : CARDINAL; (* InitStack allocates an initial stack of 100 elements. *) PROCEDURE InitStack () = BEGIN stackN := 100; stack := NEW(REF ARRAY OF RefReferent, stackN); stack0 := ADR(stack^[0]); stack1 := stack0 + stackN * ADRSIZE(RefReferent); stackA := stack0; END InitStack; (* PushStack pushes an object onto the stack, growing it if necessary. *) PROCEDURE PushStack (ref: RefReferent) = BEGIN IF stackA = stack1 THEN VAR newStackN := stackN * 2; newStack: REF ARRAY OF RefReferent := NEW(REF ARRAY OF RefReferent, newStackN); BEGIN SUBARRAY(newStack^, 0, stackN) := SUBARRAY(stack^, 0, stackN); stack0 := ADR(newStack^[0]); stackA := stack0 + stackN * ADRSIZE(RefReferent); stack1 := stack0 + newStackN * ADRSIZE(RefReferent); stack := newStack; stackN := newStackN; END; END; stackA^ := ref; INC(stackA, ADRSIZE(RefReferent)); END PushStack; (* PopStack pops an object off the stack. *) PROCEDURE PopStack (): RefReferent = BEGIN DEC(stackA, ADRSIZE(RefReferent)); RETURN stackA^; END PopStack; (* StackEmpty tells if the stack is empty. *) PROCEDURE StackEmpty (): BOOLEAN = BEGIN RETURN stackA = stack0; END StackEmpty; (* PushStackProcedure is called from WeakWalkToPush to push a ref found inside another object. *) PROCEDURE PushStackProcedure (<*UNUSED*> arg : REFANY; cp : ADDRESS; <*UNUSED*> root: ADDRESS; <*UNUSED*> kind: RT0.RefType) = VAR ref := LOOPHOLE(cp, UNTRACED REF RefReferent)^; BEGIN IF ref # NIL THEN PushStack(ref); END; END PushStackProcedure; CONST switch = ARRAY Space OF Space { Space.Unallocated, Space.Free, Space.Current, Space.Free }; (* When we need more heap, we get it from the operating system. This function returns the number of the first new page. *) PROCEDURE GrowHeap (minInc: INTEGER): Page = VAR actualInc := MAX (ROUND (FLOAT (allocatedPages) * growthRate), MAX (minIncrement, 2 * minInc)); newChunk : ADDRESS; newSideSpan : INTEGER; firstNewPage : Page; lastNewPage : Page; newFirstAllocatedPage : Page; newLastAllocatedPage : Page; BEGIN (* our caller is responsible to set the side arrays for the the minInc pages we return him; but we have to set them for the extra pages we have allocated (free) and the pages that come in [newFirstAllocatedPage..newLastAllocatedPage] (unallocated) *) newChunk := LOOPHOLE (Unix.sbrk ((actualInc + 1) * BytesPerPage), ADDRESS); IF newChunk = NIL OR newChunk = LOOPHOLE (-1, ADDRESS) THEN FatalError (NIL, 0, "gc: Could not extend the traced heap"); END; firstNewPage := (LOOPHOLE (newChunk, INTEGER) + BytesPerPage - 1) DIV BytesPerPage; lastNewPage := firstNewPage + actualInc - 1; (* determine the new boundaries of the allocated pages *) IF firstAllocatedPage = InvalidPage THEN newFirstAllocatedPage := firstNewPage; newLastAllocatedPage := lastNewPage; ELSIF firstNewPage < firstAllocatedPage THEN newFirstAllocatedPage := firstNewPage; newLastAllocatedPage := lastAllocatedPage; ELSIF lastAllocatedPage < lastNewPage THEN newFirstAllocatedPage := firstAllocatedPage; newLastAllocatedPage := lastNewPage; ELSE newFirstAllocatedPage := firstAllocatedPage; newLastAllocatedPage := lastAllocatedPage; END; (* extend the side arrays if necessary *) newSideSpan := newLastAllocatedPage - newFirstAllocatedPage + 1; IF desc = NIL OR newSideSpan # NUMBER (desc^) THEN WITH newDesc = NEW (UNTRACED REF ARRAY OF Desc, newSideSpan) DO IF desc # NIL THEN FOR i := FIRST (desc^) TO LAST (desc^) DO newDesc [i+firstAllocatedPage-newFirstAllocatedPage] := desc [i]; END; FOR i := lastAllocatedPage + 1 TO firstNewPage - 1 DO newDesc [i - newFirstAllocatedPage].space := Space.Unallocated; END; FOR i := lastNewPage + 1 TO firstAllocatedPage - 1 DO newDesc [i - newFirstAllocatedPage].space := Space.Unallocated; END; DISPOSE (desc); END; desc := newDesc; END; END; firstAllocatedPage := newFirstAllocatedPage; lastAllocatedPage := newLastAllocatedPage; FOR i := firstNewPage TO lastNewPage DO desc [i-firstAllocatedPage].space := Space.Free; END; IF perfOn THEN PerfGrow (firstNewPage, actualInc); END; INC (allocatedPages, actualInc); RETURN firstNewPage; END GrowHeap; PROCEDURE FindPages (nbPages: INTEGER): Page = VAR free, toExamine: INTEGER; page: Page; BEGIN (* `freePage'is the first free page we should look at. The name of the game is to find a sequence of `nbPages' contiguous free pages in the heap, starting there. We keep the following invariant: `page' designates the page we are examining and the 'free' pages before it are are free, contiguous and allocated. *) toExamine := allocatedPages; page := freePage; free := 0; WHILE free < nbPages AND toExamine > 0 DO IF desc [page - firstAllocatedPage].space = Space.Free THEN IF page = freePage + free THEN (* page is contiguous *) INC (free); ELSE freePage := page; free := 1; END; ELSE freePage := NextPage (page); free := 0; END; page := NextPage (page); DEC (toExamine); END; IF free # nbPages THEN (* No such sequence exist. We allocate a new chunk from the system and we know that the first pages there have the appropriate properties *) freePage := GrowHeap (nbPages); END; RETURN freePage; END FindPages; PROCEDURE AllocatePages (nbPages: INTEGER): ADDRESS = BEGIN WITH firstPage = FindPages (nbPages) DO FOR i := firstPage TO firstPage + nbPages - 1 DO desc [i - firstAllocatedPage].space := Space.Unallocated; END; IF perfOn THEN PerfChanged (firstPage, nbPages); END; DEC (allocatedPages, nbPages); RETURN PageToAddress (firstPage); END; END AllocatePages; PROCEDURE MakeCollectible (from: ADDRESS; nbPages: INTEGER) = VAR firstNewPage, lastNewPage : Page; BEGIN firstNewPage := (LOOPHOLE (from, INTEGER) + BytesPerPage - 1) DIV BytesPerPage; lastNewPage := firstNewPage + nbPages - 1; (* update the side arrays *) FOR i := firstNewPage TO lastNewPage DO desc [i - firstAllocatedPage] := Desc {Space.Current, Type.Continued, Promotion.Allocated, InvalidPage}; END; IF perfOn THEN PerfChanged (firstNewPage, nbPages); END; INC (allocatedPages, nbPages); END MakeCollectible; (* Gcalloc handles only the fast path to allocation *) PROCEDURE Gcalloc (dataSize, dataAlignment: CARDINAL): RefReferent = VAR alignment: INTEGER; newFreep: RefHeader; res, referentTry: RefReferent; BEGIN INC (RT0u.inCritical); (* Where would this heap object end if we were to allocate at freep ? *) referentTry := freep + ADRSIZE (Header); (*---------------- see CheckTypes --------------------------------- WITH a = RTMisc.Align (referentTry, dataAlignment) DO alignment := a - referentTry; newFreep := LOOPHOLE (a + dataSize, RefHeader); END; ------------------------------------------------------------------*) VAR tmp := Word.And (LOOPHOLE (referentTry, INTEGER), MaxAlignMask); BEGIN alignment := align [tmp, dataAlignment]; newFreep := referentTry + (alignment + dataSize); END; (* If this is not ok, take the long route *) IF boundary < newFreep THEN res := LongGcalloc (dataSize, dataAlignment); DEC (RT0u.inCritical); RETURN res; END; (* Align the referent *) IF (alignment # 0) THEN InsertFiller (freep, alignment); freep := freep + alignment; END; res := LOOPHOLE (freep + ADRSIZE (Header), RefReferent); freep := newFreep; DEC (RT0u.inCritical); RETURN (res); END Gcalloc; PROCEDURE CollectNow () = BEGIN INC (RT0u.inCritical); IF collectionProhibited <= 0 AND (smallActivePages # 0 OR largeActivePages # 0) THEN Collect (0); END; DEC (RT0u.inCritical); END CollectNow; PROCEDURE LongGcalloc (dataSize, dataAlignment: CARDINAL): RefReferent = VAR nbBytes := RTMisc.Upper (ADRSIZE (Header), dataAlignment) + dataSize; nbPages := (nbBytes + AdrPerPage - 1) DIV AdrPerPage; newFreep, newBoundary: RefHeader; res: RefReferent; allocIn : Space; BEGIN (* we cannot just take this many pages if activating them would lead to more than half of the allocated pages to be active *) IF nbPages = 1 AND smallActivePages + 1 > (allocatedPages - largeActivePages) DIV 2 AND promoteBigObjects OR nbPages > 1 AND smallActivePages > (allocatedPages - largeActivePages - nbPages) DIV 2 AND promoteBigObjects OR NOT promoteBigObjects AND smallActivePages + largeActivePages + nbPages > allocatedPages DIV 2 THEN IF NOT collectionGoingOn AND collectionProhibited <= 0 AND (smallActivePages # 0 OR largeActivePages # 0) THEN Collect (nbPages); LOCK mutex DO INC (collectionProhibited); END; res := Gcalloc (dataSize, dataAlignment); LOCK mutex DO DEC (collectionProhibited); END; RETURN (res); ELSE (* we cannot collect but we want more pages *) freePage := GrowHeap (nbPages); END; ELSE (* there are at least as many pages as we need that are not active. But we have to find these pages; also, they may notbe contiguous *) freePage := FindPages (nbPages); END; (* so freePage points to a block of at least nbPages contiguous, free pages; just what we need ! *) (* may be we have to put a filler to align this thing *) newFreep := PageToHeader (freePage); res := RTMisc.Align (newFreep + ADRSIZE (Header), dataAlignment); InsertFiller (newFreep, res - ADRSIZE (Header) - newFreep); (* allocate the object *) newFreep := LOOPHOLE (res + dataSize, RefHeader); newBoundary := PageToHeader (freePage + nbPages); (* discard the smallest leftover of the current allocation page and the new allocation page. If we are garbage collecting, and we keep the old page, we may allocate things in it after we scan the page we have just allocated, so we have to rescan it *) IF collectionGoingOn THEN allocIn := Space.New; (* the space left on the page where we were allocating is lost *) InsertFiller (freep, boundary - freep); IF nbPages = 1 THEN (* we can still put things on the page we have just taken *) freep := newFreep; boundary := newBoundary; ELSE (* this is a large object, don't put anything with it *) boundary := freep; END; (* We are compacting right now; at some point, we will have to chase the pointers in referent we are just moving, so we make sure that this page is in the list *) Queue (freePage); ELSE (* not in a collection *) allocIn := Space.Current; IF nbPages = 1 AND newBoundary - newFreep > boundary - freep THEN (* keep the larger of the two free areas *) VAR tmp: RefHeader; BEGIN tmp := newFreep; newFreep := freep; freep := tmp; tmp := newBoundary; newBoundary := boundary; boundary := tmp; END; END; END; (* make sure that the remainder of the current free page is parsable *) InsertFiller (newFreep, newBoundary - newFreep); desc [freePage - firstAllocatedPage] := Desc {allocIn, Type.Header, Promotion.Allocated, InvalidPage}; IF nbPages = 1 THEN INC (smallActivePages); ELSE INC (largeActivePages, nbPages); FOR i := 1 TO nbPages - 1 DO desc [freePage + i - firstAllocatedPage] := Desc {allocIn, Type.Continued, Promotion.Allocated, InvalidPage}; END; END; IF perfOn THEN PerfChanged (freePage, nbPages); END; (* a good guess to start to look for free, contiguous pages *) freePage := NextPage (freePage + nbPages - 1); RETURN (res); END LongGcalloc; (*------------------------------------------------------------ allocation ---*) VAR init_cache : ARRAY [0..2047] OF ADDRESS; (* initialized copies of freshly allocated objects *) PROCEDURE FastAllocate (tc: Typecode): REFANY = BEGIN RETURN Allocate (tc); END FastAllocate; CONST HdrSize = ADRSIZE (Header); PROCEDURE Allocate (tc: Typecode): REFANY = VAR def: RT0.TypeDefinition; res: RefReferent; BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) def := RT0u.types [tc]; IF NOT def.traced OR def.nDimensions # 0 THEN FatalErrorI ("RTHeap.Allocate: improper typecode: ", tc); END; INC (RT0u.inCritical); res := Gcalloc (def.dataSize, def.dataAlignment); IF (init_cache[tc] # NIL) THEN RTMisc.Copy (init_cache[tc], res - HdrSize, def.dataSize + HdrSize); ELSE LOOPHOLE (res - HdrSize, RefHeader)^ := Header {typecode := tc, forwarded := FALSE}; RTMisc.Zero (res, def.dataSize); IF def.defaultMethods # NIL THEN LOOPHOLE (res, UNTRACED REF ADDRESS)^ := def.defaultMethods; END; VAR d := def; BEGIN WHILE d # NIL DO IF d.initProc # NIL THEN d.initProc (res) END; d := d.parent; END; END; IF (def.dataSize <= BYTESIZE (RT0.Typecell)) THEN init_cache [tc] := Malloc (def.dataSize + HdrSize); RTMisc.Copy (res - HdrSize, init_cache [tc], HdrSize + def.dataSize); END; END; DEC (RT0u.inCritical); RETURN LOOPHOLE (res, REFANY); END Allocate; PROCEDURE AllocateUntracedRef (tc: Typecode): ADDRESS = VAR res: ADDRESS; BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) WITH def = RT0u.types [tc]^ DO IF def.traced OR def.defaultMethods # NIL THEN FatalErrorI ("RTHeap.AllocateUntracedRef: improper typecode: ", tc); END; res := Malloc (def.dataSize); RTMisc.Zero (res, def.dataSize); IF def.initProc # NIL THEN def.initProc (res) END; END; (*WITH def*) RETURN (res); END AllocateUntracedRef; PROCEDURE AllocateUntraced (tc: Typecode): UNTRACED ROOT = VAR d: RT0.TypeDefinition; res: ADDRESS; hdrSize: INTEGER; BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) d := RT0u.types [tc]; WITH def = d^ DO IF def.traced OR def.defaultMethods = NIL THEN FatalErrorI ("RTHeap.AllocateUntraced: improper typecode:", tc); END; hdrSize := MAX (BYTESIZE (Header), def.dataAlignment); res := Malloc (hdrSize + def.dataSize) + hdrSize; RTMisc.Zero (res, def.dataSize); (* given res, we are not able to free the result of malloc *) LOOPHOLE (res - ADRSIZE (Header), RefHeader)^ := Header {typecode := tc, forwarded := FALSE}; IF def.defaultMethods # NIL THEN LOOPHOLE (res, UNTRACED REF ADDRESS)^ := def.defaultMethods; END; WHILE d # NIL DO IF d.initProc # NIL THEN d.initProc (res) END; d := d.parent; END; END; (*WITH def*) RETURN (res); END AllocateUntraced; PROCEDURE CheckOpenArray (READONLY def: RT0.Typecell; READONLY s: ARRAY OF INTEGER): INTEGER = VAR nbElems := 1; BEGIN IF def.nDimensions = 0 THEN FatalErrorI ("RTHeap.CheckOpenArray: improper typecode: ", def.typecode); END; IF def.nDimensions # NUMBER (s) THEN FatalError (NIL, 0, "RTHeap.CheckOpenArray", ": dimensions in type and shape disagree"); END; FOR i := 0 TO def.nDimensions - 1 DO IF (s[i] < 0) THEN FatalErrorI ("RTHeap.CheckOpenArray: negative array size: ", s[i]); END; nbElems := s[i] * nbElems; END; RETURN nbElems; END CheckOpenArray; PROCEDURE FastAllocateOpenArray (tc: Typecode; READONLY s: ARRAY OF INTEGER): REFANY = BEGIN RETURN AllocateOpenArray (tc, s); END FastAllocateOpenArray; PROCEDURE AllocateOpenArray (tc: Typecode; READONLY s: ARRAY OF INTEGER): REFANY = VAR def: RT0.TypeDefinition; nbElems, nBytes: CARDINAL; res: RefReferent; BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) def := RT0u.types [tc]; nbElems := CheckOpenArray (def^, s); nBytes := def.dataSize + nbElems * def.elementSize; INC (RT0u.inCritical); res := Gcalloc (RTMisc.Upper (nBytes, BYTESIZE(Header)), def.dataAlignment); LOOPHOLE (res - ADRSIZE (Header), RefHeader)^ := Header {typecode := tc, forwarded := FALSE}; LOOPHOLE (res, UNTRACED REF ADDRESS)^ := res + def.dataSize; FOR i := 0 TO def.nDimensions - 1 DO LOOPHOLE (res + ADRSIZE (ADDRESS) + i * ADRSIZE (INTEGER), UNTRACED REF INTEGER)^ := s[i]; END; RTMisc.Zero (res + ADRSIZE (ADDRESS) + def.nDimensions * ADRSIZE (INTEGER), nbElems * def.elementSize); WHILE def # NIL DO IF def.initProc # NIL THEN def.initProc (res) END; def := def.parent; END; DEC (RT0u.inCritical); RETURN LOOPHOLE (res, REFANY); END AllocateOpenArray; PROCEDURE AllocateUntracedOpenArrayDef (def: RT0.TypeDefinition; READONLY s: ARRAY OF INTEGER): ADDRESS = VAR nbElems := CheckOpenArray (def^, s); VAR res := Malloc (def.dataSize + nbElems * def.elementSize); BEGIN LOOPHOLE (res, UNTRACED REF ADDRESS)^ := res + def.dataSize; FOR i := 0 TO def.nDimensions - 1 DO LOOPHOLE (res + ADRSIZE (ADDRESS) + i * ADRSIZE (INTEGER), UNTRACED REF INTEGER)^ := s[i]; END; RTMisc.Zero (res + ADRSIZE (ADDRESS) + def.nDimensions * ADRSIZE (INTEGER), nbElems * def.elementSize); WHILE def # NIL DO IF def.initProc # NIL THEN def.initProc (res) END; def := def.parent; END; RETURN res; END AllocateUntracedOpenArrayDef; PROCEDURE AllocateUntracedOpenArray (tc: Typecode; READONLY s: ARRAY OF INTEGER): ADDRESS = BEGIN (* if tc is not proper, the array indexing will fail; that is our runtime check in this case *) RETURN AllocateUntracedOpenArrayDef (RT0u.types [tc], s); END AllocateUntracedOpenArray; PROCEDURE Duplicate (r: REFANY): REFANY = VAR def := RT0u.types [TypecodeOf (LOOPHOLE (r, ADDRESS))]; VAR header := HeaderOf (LOOPHOLE (r, ADDRESS)); VAR dataSize := ReferentSize (header); VAR nBytes := RTMisc.Upper (dataSize, BYTESIZE (Header)); VAR res := Gcalloc (nBytes, def.dataAlignment); BEGIN RTMisc.Copy (header, HeaderOf (res), ADRSIZE (Header) + dataSize); (* if it is an open array, update its internal data pointer *) IF def.nDimensions # 0 THEN LOOPHOLE (res, UNTRACED REF ADDRESS)^ := res + (LOOPHOLE (r, UNTRACED REF ADDRESS)^ - LOOPHOLE (r, ADDRESS)); END; RETURN LOOPHOLE (res, REFANY); END Duplicate; PROCEDURE Malloc (size: INTEGER): ADDRESS = VAR res: ADDRESS; BEGIN INC (RT0u.inCritical); res := Cstdlib.malloc (size); IF res = NIL THEN FatalError (NIL, 0, "malloc failed, unable to get more memory"); END; DEC (RT0u.inCritical); RETURN res; END Malloc; (*------------------------------------------------------------- collector ---*) PROCEDURE DisableCollection () = BEGIN LOCK mutex DO INC (collectionProhibited); END; IF perfOn THEN PerfAllow (); END; END DisableCollection; PROCEDURE EnableCollection () = BEGIN LOCK mutex DO DEC (collectionProhibited); END; IF perfOn THEN PerfAllow (); END; END EnableCollection; (*------------------------------------------------------ heap data layout ---*) PROCEDURE GetDataAdr (r: REFANY): ADDRESS = VAR def: RT0.TypeDefinition; BEGIN def := RT0u.types [TypecodeOf (LOOPHOLE (r, ADDRESS))]; IF def.defaultMethods # NIL THEN RETURN (LOOPHOLE (r, ADDRESS) + ADRSIZE (ADDRESS)); ELSIF def.nDimensions # 0 THEN RETURN LOOPHOLE (r, UNTRACED REF ADDRESS)^; ELSE RETURN LOOPHOLE (r, ADDRESS); END; END GetDataAdr; PROCEDURE GetDataSize (r: REFANY): CARDINAL = VAR def: RT0.TypeDefinition; nbElems: INTEGER; BEGIN def := RT0u.types [TypecodeOf (LOOPHOLE (r, ADDRESS))]; IF def.defaultMethods # NIL THEN RETURN (def.dataSize - BYTESIZE (ADDRESS)); ELSIF def.nDimensions # 0 THEN nbElems := 1; VAR sizes := LOOPHOLE (LOOPHOLE (r, ADDRESS) + ADRSIZE (ADDRESS), UNTRACED REF INTEGER); BEGIN FOR i := 0 TO def.nDimensions - 1 DO nbElems := nbElems * sizes^; INC (sizes, ADRSIZE (INTEGER)); END; END; RETURN (nbElems * def.elementSize); ELSE RETURN def.dataSize; END; END GetDataSize; (*----------------------------------------------------------- open arrays ---*) PROCEDURE GetShape (r: REFANY; VAR nDimensions: INTEGER; VAR s: ArrayShape) = BEGIN WITH def = RT0u.types [TypecodeOf (LOOPHOLE (r, ADDRESS))] DO nDimensions := def.nDimensions; IF nDimensions # 0 THEN s := LOOPHOLE (LOOPHOLE (r, ADDRESS) + ADRSIZE (ADDRESS), ArrayShape); END; END; END GetShape; PROCEDURE GetNDimensions (tc: Typecode): CARDINAL = VAR def: RT0.TypeDefinition; BEGIN def := RT0u.types [tc]; RETURN (def.nDimensions); END GetNDimensions; (*-------------------------------------------------------------- monitors ---*) TYPE PublicMonitorClosure = OBJECT METHODS before (); after (); END; REVEAL MonitorClosure = PublicMonitorClosure BRANDED "RTHeap.MonitorClosure" OBJECT next, prev: MonitorClosure; OVERRIDES before := Noop; after := Noop; END; VAR monitorsHead, monitorsTail: MonitorClosure; PROCEDURE RegisterMonitor (cl: MonitorClosure) = BEGIN cl.next := monitorsHead; IF monitorsHead = NIL THEN monitorsTail := cl; ELSE monitorsHead.prev := cl; END; monitorsHead := cl; END RegisterMonitor; PROCEDURE UnregisterMonitor (cl: MonitorClosure) = BEGIN IF cl = monitorsHead THEN IF cl = monitorsTail THEN monitorsHead := NIL; monitorsTail := NIL; ELSE monitorsHead := monitorsHead.next; monitorsHead.prev := NIL; END; ELSE IF cl = monitorsTail THEN monitorsTail := monitorsTail.prev; monitorsTail.next := NIL; ELSE cl.prev.next := cl.next; cl.next.prev := cl.prev; END; END; END UnregisterMonitor; PROCEDURE Noop (<*UNUSED*> cl: MonitorClosure) = BEGIN END Noop; (*--------------------------------------------------------------- objects ---*) PROCEDURE GetSize (r: REFANY): INTEGER = BEGIN RETURN ReferentSize (HeaderOf (LOOPHOLE (r, ADDRESS))) + BYTESIZE (Header) END GetSize; PROCEDURE DisposeTraced (VAR r: REFANY) = BEGIN r := NIL; END DisposeTraced; PROCEDURE DisposeUntraced (VAR a: ADDRESS) = BEGIN IF a # NIL THEN Cstdlib.free (a); a := NIL; END; END DisposeUntraced; PROCEDURE DisposeUntracedObj (VAR a: ADDRESS) = VAR def : RT0.TypeDefinition; BEGIN IF a # NIL THEN def := RT0u.types [TypecodeOf (a)]; a := a - MAX (BYTESIZE (Header), def.dataAlignment); Cstdlib.free (a); a := NIL; END; END DisposeUntracedObj; (*--------------------------------------------------------------- OBJECTs ---*) PROCEDURE SetDefaultMethods (r: ROOT) = VAR tc := TypecodeOf (LOOPHOLE (r, RefReferent)); BEGIN LOOPHOLE (r, UNTRACED REF ADDRESS)^ := RT0u.types[tc].defaultMethods; END SetDefaultMethods; (*--------------------------------------------------------------- ice box ---*) (* The ice box contains addresses of referents that should not be moved by the garbage collector. The main use of that feature is to support hash tables indexed by refs. Of course, the garbage collector has to know about those referents. Since they don't move, we can store them in a hash table (I know, it is funny to implement ref table to support ref table, but...) To allow multiple independant freeze of a given referent, a count is associated to each of them. The icebox is a hash table of pairs
. Each bucket is list sorted in increasing order. To make things simpler, the table is not an array of pointers to the heads of the lists, but rather an array of list elements. That way, insertions and deletion at the front of the list don't have to be treated separately. *) CONST Multiplier = -1664117991; (* = LOOPHOLE( Round( .6125423371 * 2^32 ), INTEGER ) *) MaxChain = 10; TYPE IceCube = UNTRACED REF RECORD next: IceCube; k: INTEGER; temperature: INTEGER; END; Buckets = UNTRACED REF ARRAY OF IceCube; VAR iceBoxCover := NEW (Thread.Mutex); nonEmptyBuckets := 0; iceBuckets := NEW (Buckets, 64); iceBoxLogSize := 6; iceBoxEntries := 0; PROCEDURE LookupIceBox (k: INTEGER; VAR bucket: CARDINAL; VAR prev: IceCube): BOOLEAN RAISES {} = (* look for an entry with key 'k'. If found, set 'bucket' to the bucket containing it, 'prev' to the entry before it (may be NIL if it is the first in the bucket), and return TRUE. If not found, set 'bucket' to the bucket in which it would be, 'prev' to entry that would be just before it (may be NIL if it would be the first in the bucket), and return FALSE. *) VAR entry: IceCube; BEGIN bucket := Word.Shift (k * Multiplier, iceBoxLogSize - BITSIZE (Word.T)); prev := NIL; entry := iceBuckets [bucket]; WHILE entry # NIL AND k >= entry.k DO IF k = entry.k THEN RETURN TRUE; END; prev := entry; entry := prev.next; END; RETURN entry # NIL AND k = entry.k; END LookupIceBox; PROCEDURE FreezeRef (r: REFANY) RAISES {} = VAR prev, new: IceCube; bucket: CARDINAL; BEGIN <* ASSERT r # NIL *> LOCK iceBoxCover DO IF LookupIceBox (LOOPHOLE (r, INTEGER), bucket, prev) THEN IF prev # NIL THEN DEC (prev.next.temperature); ELSE DEC (iceBuckets [bucket].temperature); END; ELSE new := NEW (IceCube, k := LOOPHOLE (r, INTEGER), temperature := -1); IF prev # NIL THEN new.next := prev.next; prev.next := new; ELSE new.next := iceBuckets [bucket]; IF new.next = NIL THEN INC (nonEmptyBuckets); END; iceBuckets [bucket] := new; END; INC (iceBoxEntries); IF (iceBoxEntries + nonEmptyBuckets - 1) DIV nonEmptyBuckets > MaxChain THEN ExpandIceBox (); END; END; END; END FreezeRef; PROCEDURE ExpandIceBox () RAISES {} = VAR oldBuckets := iceBuckets; movedEntry, nextMovedEntry, prev: IceCube; bucket: CARDINAL; BEGIN iceBuckets := NEW (Buckets, 2 * NUMBER (oldBuckets^)); INC (iceBoxLogSize); nonEmptyBuckets := 0; FOR oldI := FIRST (oldBuckets^) TO LAST (oldBuckets^) DO movedEntry := oldBuckets [oldI].next; WHILE movedEntry # NIL DO nextMovedEntry := movedEntry.next; VAR b := LookupIceBox (movedEntry.k, bucket, prev); BEGIN <* ASSERT NOT b *> END; IF prev # NIL THEN movedEntry.next := prev.next; prev.next := movedEntry; ELSE IF iceBuckets [bucket] = NIL THEN INC (nonEmptyBuckets); END; movedEntry.next := iceBuckets [bucket]; iceBuckets [bucket] := movedEntry; END; movedEntry := nextMovedEntry; END; END; DISPOSE (oldBuckets); END ExpandIceBox; PROCEDURE UnfreezeRef (r: REFANY) RAISES {} = CONST Warm = 0; VAR prev: IceCube; bucket: CARDINAL; melted: BOOLEAN; BEGIN <* ASSERT r # NIL *> LOCK iceBoxCover DO VAR b := LookupIceBox (LOOPHOLE (r, INTEGER), bucket, prev); BEGIN <* ASSERT b *> END; IF prev # NIL THEN INC (prev.next.temperature); melted := prev.next.temperature = Warm; ELSE INC (iceBuckets [bucket].temperature); melted := iceBuckets [bucket].temperature = Warm; END; IF melted THEN DeleteFrozenRef (bucket, prev); END; END; END UnfreezeRef; PROCEDURE DeleteFrozenRef (bucket: CARDINAL; prev: IceCube) RAISES {} = VAR todelete: IceCube; BEGIN IF prev # NIL THEN todelete := prev.next; prev.next := todelete.next; ELSE todelete := iceBuckets [bucket]; iceBuckets [bucket] := todelete.next; END; IF iceBuckets [bucket] = NIL THEN DEC (nonEmptyBuckets); END; DEC (iceBoxEntries); DISPOSE (todelete); END DeleteFrozenRef; (*------------------------------------------------------------- debugging ---*) VAR fillersSeen := FALSE; fillersStart: RefHeader; PROCEDURE ShowFillers (start, stop: RefHeader) = BEGIN IF NOT fillersSeen THEN RETURN; END; DEC (stop, ADRSIZE (Header)); IF start = stop THEN PutHexa (stderr, LOOPHOLE (start, INTEGER)); PutText (stderr, " - filler\n"); ELSE PutHexa (stderr, LOOPHOLE (start, INTEGER)); PutText (stderr, " - "); PutHexa (stderr, LOOPHOLE (stop, INTEGER)); PutText (stderr, " : "); PutInt (stderr, (stop - start) DIV ADRSIZE (Header) + 1); PutText (stderr, " fillers\n"); END; fillersSeen := FALSE; END ShowFillers; PROCEDURE ShowPage (page: Page; print: BOOLEAN) = (* scans page; if print is TRUE, sends to stderr the starting addresses of the objects and their typecodes *) VAR h := PageToHeader (page); p := HeaderToPage (h); def: RT0.TypeDefinition; tc: Typecode; BEGIN IF p = InvalidPage THEN PutText (stderr, "Invalid page\n"); ELSIF desc [p-firstAllocatedPage].type = Type.Continued THEN PutText (stderr, "continuation page\n"); ELSIF desc [p-firstAllocatedPage].space # Space.Current AND desc [p-firstAllocatedPage].space # Space.New THEN PutText (stderr, "page not in the current or next space\n"); ELSE WHILE HeaderToPage (h) = p AND (HeaderToPage (freep) # p OR h < freep) DO tc := h.typecode; IF (tc = Fill_1_type) OR (tc = Fill_N_type) THEN IF NOT fillersSeen THEN fillersSeen := TRUE; fillersStart := h; END; ELSE IF print THEN ShowFillers (fillersStart, h); PutHexa (stderr, LOOPHOLE (h, INTEGER)); def := RT0u.types [tc]; PutText (stderr, " : tc = "); PutInt (stderr, def.typecode); PutText (stderr, "\n"); END; END; INC (h, ADRSIZE (Header) + ReferentSize (h)); END; ShowFillers (fillersStart, h); END; END ShowPage; PROCEDURE GCstatus (mode: INTEGER) = (* dump the heap: mode >= 0: calls ShowPage on page mode mode = -1; show global information mode = -2; show global information and call ShowPage on every page *) VAR p, pp: Page; BEGIN IF mode >= 0 THEN ShowPage (mode, TRUE); ELSE PutText (stderr, "queueHead = "); PutInt (stderr, queueHead); PutText (stderr, ", collections = "); PutInt (stderr, collections); PutText (stderr, "\n"); PutText (stderr, "allocated = "); PutInt (stderr, allocatedPages); PutText (stderr, ", active = "); PutInt (stderr, smallActivePages); PutText (stderr, "s + "); PutInt (stderr, largeActivePages); PutText (stderr, "l\n"); IF mode < -1 THEN p := firstAllocatedPage; WHILE p <= lastAllocatedPage DO IF desc [p-firstAllocatedPage].space = Space.Unallocated THEN pp := p + 1; WHILE pp <= lastAllocatedPage AND desc [pp-firstAllocatedPage].space = Space.Unallocated DO INC (pp); END; PutText (stderr, "-------------- pages "); PutInt (stderr, p); PutText (stderr, " to "); PutInt (stderr, pp-1); PutText (stderr, " ---------- UNALLOCATED\n"); p := pp; ELSE IF desc [p - firstAllocatedPage].type = Type.Header THEN PutText (stderr, "h page "); ELSE PutText (stderr, "c page "); END; PutInt (stderr, p); PutText (stderr, ", space = "); PutInt (stderr, ORD (desc [p - firstAllocatedPage].space)); PutText (stderr, ", link = "); PutInt (stderr, desc [p - firstAllocatedPage].link); PutText (stderr, "\n"); IF desc [p - firstAllocatedPage].type = Type.Header AND mode < -2 AND (desc [p-firstAllocatedPage].space = Space.Current OR desc[p-firstAllocatedPage].space = Space.New) THEN ShowPage (p, mode = -4); END; INC (p); END; END; END; END; Flush (stderr); END GCstatus; PROCEDURE RefSanityCheck (<*UNUSED*> arg : REFANY; cp : ADDRESS; <*UNUSED*> root: ADDRESS; <*UNUSED*> kind: RT0.RefType) = VAR o := LOOPHOLE (cp, REF RefReferent)^; p := ReferentToPage (o); h := HeaderOf (o); tc: INTEGER; BEGIN IF o # NIL THEN tc := h.typecode; IF firstAllocatedPage <= p AND p <= lastAllocatedPage THEN <* ASSERT desc [p-firstAllocatedPage].space = Space.Current *> <* ASSERT desc [p-firstAllocatedPage].type = Type.Header *> <* ASSERT (0 < tc AND tc < RT0u.nTypes) OR tc = Fill_1_type OR tc = Fill_N_type *> ELSE (* the compiler generates Text.T that are not in the traced heap *) <* ASSERT tc = 1 *> END; END; END RefSanityCheck; TYPE SanityClosure = MonitorClosure OBJECT OVERRIDES before := SanityCheck; after := SanityCheck; END; PROCEDURE SanityCheck (<*UNUSED*> self: SanityClosure) = (* check that the heap is in reasonable shape; fail by ASSERT failures *) VAR p := firstAllocatedPage; ep: Page; h, n : RefHeader; tc: Typecode; proc: RT0.TypeMapProc; BEGIN WHILE p <= lastAllocatedPage DO CASE desc [p-firstAllocatedPage].space OF | Space.Unallocated => (* ok *) INC (p); | Space.Current => <* ASSERT desc [p-firstAllocatedPage].type = Type.Header *> (* go over the heap objects *) h := PageToHeader (p); ep := p; WHILE HeaderToPage (h) = p AND (freep = boundary OR HeaderToPage (freep) # p OR h < freep) DO tc := h.typecode; <* ASSERT (0 < tc AND tc < RT0u.nTypes) OR tc = Fill_1_type OR tc = Fill_N_type *> n := h + ADRSIZE (Header) + ReferentSize (h); (* this may be a multi-page object; check the other pages *) ep := HeaderToPage (n - ADRSIZE (Header)); FOR i := p + 1 TO ep DO <* ASSERT desc [i-firstAllocatedPage].type = Type.Continued *> END; (* check the references in this object *) IF (tc # Fill_1_type) AND (tc # Fill_N_type) THEN proc := RT0u.types [tc].mapProc; IF proc # NIL THEN proc (RefSanityCheck, NIL, h + ADRSIZE (Header), TracedRefTypes); END; END; h := n; END; p := ep + 1; | Space.New => (* ok *) INC (p); | Space.Free => (* ok *) INC (p); END; END; END SanityCheck; PROCEDURE InstallSanityCheck () = VAR cl: SanityClosure; BEGIN cl := NEW (SanityClosure); RegisterMonitor (cl); END InstallSanityCheck; (*---------------------------------------------------------------------------*) PROCEDURE VisitAllRefs (v: RefVisitor) = VAR tc: Typecode; BEGIN TRY DisableCollection (); FOR p := firstAllocatedPage TO lastAllocatedPage DO IF desc [p - firstAllocatedPage].space = Space.Current AND desc [p - firstAllocatedPage].type = Type.Header THEN VAR h := PageToHeader (p); he := PageToHeader (p + 1); size : INTEGER; BEGIN WHILE h < he AND (freep = boundary OR HeaderToPage (freep) # p OR h < freep) DO size := ReferentSize (h); tc := h.typecode; IF (tc # Fill_1_type) AND (tc # Fill_N_type) THEN IF NOT v.visit (tc, LOOPHOLE (h + ADRSIZE (Header), REFANY), size) THEN RETURN; END; END; INC (h, ADRSIZE (Header) + size); END; END; END; END; FINALLY EnableCollection (); END; END VisitAllRefs; TYPE CountClosure = MonitorClosure OBJECT tcs : REF ARRAY OF Typecode; counts : REF ARRAY OF CARDINAL; visitor : RefVisitor; OVERRIDES after := CountRefsForTypecodes; END; TYPE CountAllClosure = MonitorClosure OBJECT counts : REF ARRAY OF CARDINAL; visitor : RefVisitor; OVERRIDES after := CountRefsForAllTypecodes; END; PROCEDURE ShowCounts (READONLY tcs: ARRAY OF Typecode) : MonitorClosure = VAR cl: CountClosure; BEGIN cl := NEW (CountClosure); cl.tcs := NEW (REF ARRAY OF Typecode, NUMBER (tcs)); cl.tcs^ := tcs; cl.counts := NEW (REF ARRAY OF CARDINAL, NUMBER (tcs)); cl.visitor := NEW (CountVisitor, cl := cl); RegisterMonitor (cl); RETURN cl; END ShowCounts; PROCEDURE ShowAllCounts () : MonitorClosure = VAR cl: CountAllClosure; BEGIN cl := NEW (CountAllClosure); cl.counts := NEW (REF ARRAY OF CARDINAL, RT0u.nTypes); cl.visitor := NEW (CountAllVisitor, cl := cl); RegisterMonitor (cl); RETURN cl; END ShowAllCounts; TYPE CountVisitor = RefVisitor OBJECT cl: CountClosure OVERRIDES visit := One; END; CountAllVisitor = RefVisitor OBJECT cl: CountAllClosure OVERRIDES visit := All; END; PROCEDURE One (self: CountVisitor; tc: Typecode; <*UNUSED*> r: REFANY; <*UNUSED*> size: CARDINAL): BOOLEAN = BEGIN FOR i := FIRST (self.cl.tcs^) TO LAST (self.cl.tcs^) DO IF self.cl.tcs[i] = tc THEN INC (self.cl.counts [i]); RETURN TRUE; END; END; RETURN TRUE; END One; PROCEDURE All (self: CountAllVisitor; tc: Typecode; <*UNUSED*> r: REFANY; <*UNUSED*> size: CARDINAL): BOOLEAN = BEGIN INC (self.cl.counts [tc]); RETURN TRUE; END All; PROCEDURE CountRefsForTypecodes (cl: CountClosure) = BEGIN FOR i := FIRST (cl.counts^) TO LAST (cl.counts^) DO cl.counts[i] := 0; END; VisitAllRefs (cl.visitor); FOR i := FIRST (cl.tcs^) TO LAST (cl.tcs^) DO PutText (stderr, "count["); PutInt (stderr, cl.tcs[i]); PutText (stderr, "] = "); PutInt (stderr, cl.counts[i]); IF i # LAST (cl.tcs^) THEN PutText (stderr, ", "); END; END; PutText (stderr, "\n"); END CountRefsForTypecodes; PROCEDURE CountRefsForAllTypecodes (cl: CountAllClosure) = BEGIN FOR i := FIRST (cl.counts^) TO LAST (cl.counts^) DO cl.counts[i] := 0; END; VisitAllRefs (cl.visitor); FOR i := FIRST (cl.counts^) TO LAST (cl.counts^) DO IF cl.counts[i] > 1 THEN PutInt (stderr, i); PutText (stderr, ": "); PutInt (stderr, cl.counts[i]); IF i # LAST (cl.counts^) THEN PutText (stderr, ", "); END; END; END; PutText (stderr, "\n"); END CountRefsForAllTypecodes; (*-------------------------------------------------------- initialization ---*) CONST MaxAlignment = 8; CONST MaxAlignMask = 2_0111; (* bit mask to capture MaxAlignment *) TYPE MaxAlignRange = [0..MaxAlignment-1]; VAR align : ARRAY MaxAlignRange, [1..MaxAlignment] OF INTEGER; (* align[i,j] == RTMisc.Align (i, j) - i *) PROCEDURE CheckTypes () = (* called by RTType.Init after type registration, but before any allocation *) VAR is_power : ARRAY [0..MaxAlignment] OF BOOLEAN; size: INTEGER; BEGIN (* ensure that init_cache is large enough *) <*ASSERT RT0u.nTypes <= NUMBER (init_cache) *> (* check that it's safe to eliminate the #A call to upper ... *) FOR i := 0 TO RT0u.nTypes-1 DO WITH def = RT0u.types [i]^ DO IF (def.traced) AND (def.nDimensions = 0) THEN size := def.dataSize; <*ASSERT size = RTMisc.Upper (size, BYTESIZE (Header)) *> END; END; END; (* compute the small powers of two *) FOR i := FIRST (is_power) TO LAST (is_power) DO is_power [i] := FALSE END; is_power [1] := TRUE; is_power [2] := TRUE; is_power [4] := TRUE; is_power [8] := TRUE; <* ASSERT MaxAlignment <= 8 *> (* check that all data alignments are small powers of two so that "RTMisc.Align (addr, alignment)" can be safely replaced by "addr + align [Word.And (addr, 7), alignment]" in Gcalloc.*) FOR i := 0 TO RT0u.nTypes-1 DO WITH def = RT0u.types [i]^ DO IF (def.traced) THEN <*ASSERT is_power [def.dataAlignment] *> END; END; END; (* initialize the alignment array *) FOR i := FIRST (align) TO LAST (align) DO FOR j := FIRST (align[0]) TO LAST (align[0]) DO align [i, j] := RTMisc.Upper (i, j) - i; END; END; END CheckTypes; (*-------------------------------------------------------- showheap hooks ---*) VAR perfR, perfW: Ctypes.int; perfOn: BOOLEAN := FALSE; CONST EventSize = (BITSIZE(RTHeapEvent.T) + BITSIZE(CHAR) - 1) DIV BITSIZE(CHAR); PROCEDURE PerfStart () = VAR i, j: Page; BEGIN IF LowPerfTool.ParamStartAndWait ("showheap", perfR, perfW) THEN perfOn := TRUE; EVAL RTMisc.RegisterExitor (PerfStop); PerfGrow (firstAllocatedPage, lastAllocatedPage - firstAllocatedPage + 1); i := firstAllocatedPage; WHILE i # InvalidPage AND i <= lastAllocatedPage DO j := i + 1; WHILE j <= lastAllocatedPage AND desc [j - firstAllocatedPage].type = Type.Continued DO INC (j); END; IF desc [i - firstAllocatedPage].space # Space.Free THEN PerfChanged (i, j - i); END; i := j; END; END; END PerfStart; PROCEDURE PerfStop (<*UNUSED*>n: INTEGER := 0) = VAR e := RTHeapEvent.T {kind := RTHeapEvent.Kind.Bye}; BEGIN (* UNSAFE, but needed to prevent deadlock if we're crashing! *) EVAL Uuio.write (perfW, ADR (e), EventSize); EVAL Unix.close (perfW); END PerfStop; PROCEDURE PerfAllow (<*UNUSED*>n: INTEGER := 0) = VAR e := RTHeapEvent.T {kind := RTHeapEvent.Kind.CollectionProhibited, nb:= collectionProhibited}; BEGIN perfOn := Uuio.write (perfW, ADR (e), EventSize) # -1 ; END PerfAllow; PROCEDURE PerfStartCollection () = VAR e := RTHeapEvent.T {kind := RTHeapEvent.Kind.CollectionStart}; BEGIN perfOn := Uuio.write (perfW, ADR (e), EventSize) # -1 END PerfStartCollection; PROCEDURE PerfPromotedRoots () = VAR e := RTHeapEvent.T {kind := RTHeapEvent.Kind.PromotedRoots}; BEGIN perfOn := Uuio.write (perfW, ADR (e), EventSize) # -1; END PerfPromotedRoots; PROCEDURE PerfFlip () = VAR e := RTHeapEvent.T {kind := RTHeapEvent.Kind.Flip}; BEGIN perfOn := Uuio.write (perfW, ADR (e), EventSize) # -1; END PerfFlip; PROCEDURE PerfStopCollection () = VAR e := RTHeapEvent.T {kind := RTHeapEvent.Kind.CollectionStop}; BEGIN perfOn := Uuio.write (perfW, ADR (e), EventSize) # -1; END PerfStopCollection; PROCEDURE PerfGrow (firstNew: Page; nb: CARDINAL) = VAR e := RTHeapEvent.T {kind := RTHeapEvent.Kind.Grow, first := firstNew, nb := nb}; BEGIN perfOn := Uuio.write (perfW, ADR (e), EventSize) # -1; END PerfGrow; PROCEDURE PerfChanged (first: Page; nb: CARDINAL) = VAR e := RTHeapEvent.T {kind := RTHeapEvent.Kind.ChangePages, first := first, nb := nb, desc := desc [first - firstAllocatedPage]}; BEGIN perfOn := Uuio.write (perfW, ADR (e), EventSize) # -1; END PerfChanged; (*---------------------------------------------------------------------------*) VAR mutex := NEW(MUTEX); (* RTWeakRef *) (* weakTable contains three singly-linked lists: for entries in use (rooted at index weakLive0), dead entries awaiting cleanup (at weakDead0), and free entries (at weakFree0). Entries in use contain the weak ref, the REF, and the procedure. The "a" field of the weak ref is the index in the table; this speeds lookup. The "b" field is a unique value, taken from a 32-bit counter. Dead entries contain the same dields, but the "a" field of the weak ref is set to -1 to keep lookups from succeeding. When the cleanup procedure is to be called, the original weak ref can still be reconstructed, since the "a" field was the index. *) VAR weakTable := NEW(UNTRACED REF ARRAY OF WeakEntry, 0); weakLive0 := -1; (* the root of the in-use list *) weakDead0 := -1; (* the root of the dead list *) weakFree0 := -1; (* the root of the free list *) TYPE WeakRefAB = RECORD a: BITS 32 FOR INTEGER; b: BITS 32 FOR Word.T; END; WeakEntry = RECORD t : WeakRefAB; r : RefReferent; p : WeakRefCleanUpProc; next: INTEGER; END; (* This is WeakRef.FromRef, which returns a new weak ref for an object. *) PROCEDURE WeakRefFromRef (r: REFANY; p: WeakRefCleanUpProc := NIL): WeakRef = BEGIN LOCK mutex DO INC(collectionProhibited); (* create a WeakCleaner thread the first time through *) IF weakCleaner = NIL THEN weakCleaner := Thread.Fork(NEW(Thread.Closure, apply := WeakCleaner)); END; (* if necessary, expand weakTable *) IF weakFree0 = -1 THEN VAR newTable := NEW(UNTRACED REF ARRAY OF WeakEntry, 2 * NUMBER(weakTable^) + 1); BEGIN SUBARRAY(newTable^, 0, NUMBER(weakTable^)) := weakTable^; FOR i := NUMBER(weakTable^) TO NUMBER(newTable^) - 1 DO WITH entry = newTable[i] DO entry.t.b := 0; entry.next := weakFree0; weakFree0 := i; END; END; weakTable := newTable; END; END; IF p # NIL THEN (* mark the object as having a weak ref with non-nil cleanup *) VAR header := HeaderOf(LOOPHOLE(r, ADDRESS)); BEGIN <* ASSERT NOT header^.weak *> header^.weak := TRUE; END; END; (* allocate a new entry *) VAR i := weakFree0; BEGIN weakFree0 := weakTable[i].next; (* generate a new weak ref *) VAR t := WeakRefAB{a := i, b := Word.Plus(weakTable[i].t.b, 1)}; BEGIN <* ASSERT t.b # 0 *> (* set up the entry *) weakTable[i] := WeakEntry{t := t, r := LOOPHOLE(r, RefReferent), p := p, next := weakLive0}; weakLive0 := i; DEC(collectionProhibited); RETURN LOOPHOLE(t, WeakRef); END; END; END; END WeakRefFromRef; (* This is WeakRef.FromRef, which inverts ToRef *) PROCEDURE WeakRefToRef (t: WeakRef): REFANY = BEGIN LOCK mutex DO INC(collectionProhibited); (* if the weak ref is not dead, we know the index *) WITH entry = weakTable[LOOPHOLE(t, WeakRefAB).a] DO VAR r: REFANY := NIL; BEGIN (* check the weak ref there *) IF entry.t = LOOPHOLE(t, WeakRefAB) THEN r := LOOPHOLE(entry.r, REFANY); END; DEC(collectionProhibited); RETURN r; END; END; END; END WeakRefToRef; VAR weakCleaner : Thread.T; weakCondition := NEW(Thread.Condition); (* WeakCleaner waits for entries to be placed on the deead list, then cleans them up and puts them on the free list. *) PROCEDURE WeakCleaner (<*UNUSED*> closure: Thread.Closure): REFANY = VAR i : INTEGER; copy: WeakEntry; BEGIN LOOP (* get an entry to handle. copy its contents, then put it on the free list. *) LOCK mutex DO WHILE weakDead0 = -1 DO Thread.Wait(mutex, weakCondition); END; INC(collectionProhibited); i := weakDead0; WITH entry = weakTable[i] DO <* ASSERT entry.t.a = -1 *> copy := entry; weakDead0 := entry.next; entry.next := weakFree0; weakFree0 := i; END; DEC(collectionProhibited); END; (* call the registered procedure. note that collections are allowed; the copy is kept on the stack so the object won't be freed during the call. *) IF copy.p # NIL THEN copy.p(LOOPHOLE(WeakRefAB{a := i, b := copy.t.b}, WeakRef), LOOPHOLE(copy.r, REFANY)); END; copy.r := NIL; (* to help conservative collector *) END; END WeakCleaner; (*---------------------------------------------------------------------------*) VAR promoteBigObjects := FALSE; BEGIN IF RTParams.IsPresent ("nogc") THEN DisableCollection (); END; IF RTParams.IsPresent ("paranoidgc") THEN InstallSanityCheck (); END; promoteBigObjects := RTParams.IsPresent ("promotebig"); PerfStart (); END RTHeap.