Reformatted Kernel files in Memory subdirectory.

Reformatted BlkPool.CC, HeapCtrl.CC, MAllocFree.CC, MemPag.CC, MemPhysical.CC, PageTables.CC.
This commit is contained in:
TomAwezome 2020-09-14 21:55:57 -04:00
parent ec3aa83b55
commit 7e0ce60071
8 changed files with 697 additions and 601 deletions

View file

@ -136,4 +136,14 @@ StartOS.CC
FileSysFAT.CC
FileSysRedSea.CC
MakeBlkDev.CC
Memory/
BlkPool.CC
HeapCtrl.CC
MAllocFree.CC
MakeMemory.CC
MemPag.CC
MemPhysical.CC
PageTables.CC
$FG,7$-Tom$FG,0$

View file

@ -1,73 +1,82 @@
U0 BlkPoolAdd(CBlkPool *bp,CMemBlk *m,I64 pags)
U0 BlkPoolAdd(CBlkPool *bp, CMemBlk *m, I64 pags)
{//Add mem to BlkPool.
if (sys_mem_init_flag)
MemSet(m,sys_mem_init_val,pags*MEM_PAG_SIZE);
MemSet(m, sys_mem_init_val, pags * MEM_PAG_SIZE);
PUSHFD
CLI
while (LBts(&bp->locked_flags,BPlf_LOCKED))
while (LBts(&bp->locked_flags, BPlf_LOCKED))
PAUSE
m->next=bp->mem_free_list;
m->pags=pags;
m->mb_signature=MBS_UNUSED_SIGNATURE_VAL;
bp->alloced_u8s+=pags<<MEM_PAG_BITS;
bp->mem_free_list=m;
LBtr(&bp->locked_flags,BPlf_LOCKED);
m->next = bp->mem_free_list;
m->pags = pags;
m->mb_signature = MBS_UNUSED_SIGNATURE_VAL;
bp->alloced_u8s += pags << MEM_PAG_BITS;
bp->mem_free_list = m;
LBtr(&bp->locked_flags, BPlf_LOCKED);
POPFD
}
U0 BlkPoolInit(CBlkPool *bp,I64 pags)
U0 BlkPoolInit(CBlkPool *bp, I64 pags)
{//Make mem chunk into a BlkPool.
I64 num;
CMemBlk *m;
MemSet(bp,0,sizeof(CBlkPool));
m=(bp(U8 *)+sizeof(CBlkPool)+MEM_PAG_SIZE-1)&~(MEM_PAG_SIZE-1);
num=(bp(U8 *)+pags<<MEM_PAG_BITS-m(U8 *))>>MEM_PAG_BITS;
bp->alloced_u8s=(pags-num)<<MEM_PAG_BITS; //Compensate before num added.
BlkPoolAdd(bp,m,num);
I64 num;
CMemBlk *m;
MemSet(bp, 0, sizeof(CBlkPool));
m = (bp(U8 *) + sizeof(CBlkPool) + MEM_PAG_SIZE - 1) & ~(MEM_PAG_SIZE - 1);
num = (bp(U8 *) + pags << MEM_PAG_BITS - m(U8 *)) >> MEM_PAG_BITS;
bp->alloced_u8s = (pags-num) << MEM_PAG_BITS; //Compensate before num added.
BlkPoolAdd(bp, m, num);
}
U0 BlkPoolsInit()
{
I64 i,total,lo,hi,code_heap_limit;
CMemE820 *m20=MEM_E820;
Bool first=TRUE;
I64 i, total, lo, hi, code_heap_limit;
CMemE820 *m20 = MEM_E820;
Bool first = TRUE;
total=MemBIOSTotal;
total = MemBIOSTotal;
if (total<=0x80000000)
code_heap_limit=total;
else if (total<=0x100000000)
code_heap_limit=total/4;
if (total <= 0x80000000)
code_heap_limit = total;
else if (total <= 0x100000000)
code_heap_limit = total / 4;
else
code_heap_limit=0x80000000;
code_heap_limit = 0x80000000;
i=code_heap_limit-SYS_16MEG_AREA_LIMIT; //See $LK,"RLf_16MEG_SYS_CODE_BP",A="FF:::/Kernel/Memory/PageTables.CC,RLf_16MEG_SYS_CODE_BP"$
BlkPoolAdd(sys_code_bp,SYS_16MEG_AREA_LIMIT,i>>MEM_PAG_BITS);
mem_heap_limit=i+SYS_16MEG_AREA_LIMIT-1;
i = code_heap_limit - SYS_16MEG_AREA_LIMIT; //See $LK,"RLf_16MEG_SYS_CODE_BP",A="FF:::/Kernel/Memory/PageTables.CC,RLf_16MEG_SYS_CODE_BP"$
BlkPoolAdd(sys_code_bp, SYS_16MEG_AREA_LIMIT, i >> MEM_PAG_BITS);
mem_heap_limit = i + SYS_16MEG_AREA_LIMIT - 1;
if (code_heap_limit<total) {
while (m20->type) {
if (m20->type==MEM_E820t_USABLE) {
lo=m20->base;
hi=m20->base+m20->len;
if (lo<code_heap_limit) {
if (hi>code_heap_limit)
lo=code_heap_limit;
if (code_heap_limit<total)
{
while (m20->type)
{
if (m20->type == MEM_E820t_USABLE)
{
lo = m20->base;
hi = m20->base + m20->len;
if (lo<code_heap_limit)
{
if (hi > code_heap_limit)
lo = code_heap_limit;
else
hi=lo; //cancel
hi = lo; //cancel
}
if (code_heap_limit<=lo<hi) {
if (first) {
BlkPoolInit(lo,(hi-lo)>>MEM_PAG_BITS);
sys_data_bp=lo;
Fs->data_heap=HeapCtrlInit(,Fs,sys_data_bp);
first=FALSE;
} else
BlkPoolAdd(sys_data_bp,lo,(hi-lo)>>MEM_PAG_BITS);
if (code_heap_limit <= lo < hi)
{
if (first)
{
BlkPoolInit(lo, (hi - lo) >> MEM_PAG_BITS);
sys_data_bp = lo;
Fs->data_heap = HeapCtrlInit(, Fs, sys_data_bp);
first = FALSE;
}
else
BlkPoolAdd(sys_data_bp, lo, (hi - lo) >> MEM_PAG_BITS);
}
}
m20++;
}
}
LBts(&sys_run_level,RLf_FULL_HEAPS);
LBts(&sys_run_level, RLf_FULL_HEAPS);
}

View file

@ -1,33 +1,37 @@
CHeapCtrl *HeapCtrlInit(CHeapCtrl *hc=NULL,CTask *task=NULL,CBlkPool *bp)
CHeapCtrl *HeapCtrlInit(CHeapCtrl *hc=NULL, CTask *task=NULL, CBlkPool *bp)
{//See $LK,"HeapLog",A="MN:HeapLog"$() for an example.
//Duplicated for $LK,"Zenith Task",A="FF:::/Kernel/KStart64.CC,CHeapCtrl.bp"$.
if (!hc)
hc=ZCAlloc(sizeof(CHeapCtrl));
hc->hc_signature=HEAP_CTRL_SIGNATURE_VAL;
hc->mem_task=task;
hc->bp=bp;
hc = ZCAlloc(sizeof(CHeapCtrl));
hc->hc_signature = HEAP_CTRL_SIGNATURE_VAL;
hc->mem_task = task;
hc->bp = bp;
QueueInit(&hc->next_mem_blk);
hc->last_mergable=NULL;
hc->next_um=hc->last_um=(&hc->next_um)(U8 *)-offset(CMemUsed.next);
hc->last_mergable = NULL;
hc->next_um = hc->last_um = (&hc->next_um)(U8 *) - offset(CMemUsed.next);
return hc;
}
U0 HeapCtrlDel(CHeapCtrl *hc)
{//Free all blks alloced to a HeapCtrl.
CMemBlk *m,*m1;
if (hc) {
CMemBlk *m, *m1;
if (hc)
{
PUSHFD
CLI
while (LBts(&hc->locked_flags,HClf_LOCKED))
while (LBts(&hc->locked_flags, HClf_LOCKED))
PAUSE
m=hc->next_mem_blk;
while (m!=&hc->next_mem_blk) {
m1=m->next;
MemPagTaskFree(m,hc);
m=m1;
m = hc->next_mem_blk;
while (m != &hc->next_mem_blk)
{
m1 = m->next;
MemPagTaskFree(m, hc);
m = m1;
}
LBtr(&hc->locked_flags,HClf_LOCKED);
LBtr(&hc->locked_flags, HClf_LOCKED);
POPFD
Free(hc);
}
}

View file

@ -4,213 +4,213 @@ asm {
_MALLOC::
// Throws 'OutMem'
PUSH RBP
MOV RBP,RSP
MOV RBP, RSP
PUSH RSI
PUSH RDI
XOR RBX,RBX
MOV RDX,U64 SF_ARG2[RBP]
TEST RDX,RDX
XOR RBX, RBX
MOV RDX, U64 SF_ARG2[RBP]
TEST RDX, RDX
JNZ @@05
MOV RDX,U64 FS:CTask.addr[RBX]
@@05: CMP U32 CTask.task_signature[RDX],TASK_SIGNATURE_VAL
MOV RDX, U64 FS:CTask.addr[RBX]
@@05: CMP U32 CTask.task_signature[RDX], TASK_SIGNATURE_VAL
#assert CTask.task_signature==CHeapCtrl.hc_signature //location signature same
#assert CTask.task_signature == CHeapCtrl.hc_signature //location signature same
JNE @@10
MOV RDX,U64 CTask.data_heap[RDX]
@@10: CMP U32 CHeapCtrl.hc_signature[RDX],HEAP_CTRL_SIGNATURE_VAL
MOV RDX, U64 CTask.data_heap[RDX]
@@10: CMP U32 CHeapCtrl.hc_signature[RDX], HEAP_CTRL_SIGNATURE_VAL
JE @@15
PUSH RDX
CALL &SysBadMAlloc
JMP I32 _SYS_HLT
@@15: MOV RAX,U64 SF_ARG1[RBP]
@@15: MOV RAX, U64 SF_ARG1[RBP]
PUSHFD
ADD RAX,CMemUsed.start+7 //round-up to I64
AND AL,0xF8
#assert CMemUsed.start>=sizeof(CMemUnused)
CMP RAX,CMemUsed.start
ADD RAX, CMemUsed.start + 7 //round-up to I64
AND AL, 0xF8
#assert CMemUsed.start >= sizeof(CMemUnused)
CMP RAX, CMemUsed.start
JAE @@20
MOV RAX,CMemUsed.start
MOV RAX, CMemUsed.start
@@20:
CLI
@@25: LOCK
BTS U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED
@@25: LOCK
BTS U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
PAUSE //don't know if this inst helps
JC @@25
CMP RAX,MEM_HEAP_HASH_SIZE
CMP RAX, MEM_HEAP_HASH_SIZE
JAE @@30
MOV RSI,U64 CHeapCtrl.heap_hash[RAX+RDX]
TEST RSI,RSI
MOV RSI, U64 CHeapCtrl.heap_hash[RAX + RDX]
TEST RSI, RSI
JZ @@35
MOV RCX,U64 CMemUnused.next[RSI]
MOV U64 CHeapCtrl.heap_hash[RAX+RDX],RCX
MOV RCX, U64 CMemUnused.next[RSI]
MOV U64 CHeapCtrl.heap_hash[RAX + RDX], RCX
JMP I32 MALLOC_ALMOST_DONE
//Big allocation
@@30: ADD RAX,sizeof(CMemBlk)+MEM_PAG_SIZE-1
SHR RAX,MEM_PAG_BITS
@@30: ADD RAX, sizeof(CMemBlk) + MEM_PAG_SIZE - 1
SHR RAX, MEM_PAG_BITS
PUSH RDX //preserve HeapCtrl
PUSH RDX
PUSH RAX
CALL &MemPagTaskAlloc
POP RDX
TEST RAX,RAX
TEST RAX, RAX
JZ @@45 //Out of memory
MOV RSI,RAX
MOV EAX,U32 CMemBlk.pags[RSI]
MOV RSI, RAX
MOV EAX, U32 CMemBlk.pags[RSI]
SHL RAX,MEM_PAG_BITS
SUB RAX,sizeof(CMemBlk)
ADD RSI,sizeof(CMemBlk)
SHL RAX, MEM_PAG_BITS
SUB RAX, sizeof(CMemBlk)
ADD RSI, sizeof(CMemBlk)
JMP I32 MALLOC_ALMOST_DONE
//Little allocation, chunk-off piece from free list chunks
@@35: LEA RSI,U64 CHeapCtrl.malloc_free_list-CMemUnused.next[RDX]
@@35: LEA RSI, U64 CHeapCtrl.malloc_free_list - CMemUnused.next[RDX]
@@40: MOV RBX,RSI
MOV RSI,U64 CMemUnused.next[RBX]
TEST RSI,RSI
@@40: MOV RBX, RSI
MOV RSI, U64 CMemUnused.next[RBX]
TEST RSI, RSI
JNZ I32 @@60
PUSH RAX //-**** save byte size
ADD RAX,16*MEM_PAG_SIZE-1
SHR RAX,MEM_PAG_BITS
ADD RAX, 16 * MEM_PAG_SIZE - 1
SHR RAX, MEM_PAG_BITS
PUSH RDX //preserve HeapCtrl
PUSH RDX
PUSH RAX
CALL &MemPagTaskAlloc
POP RDX
TEST RAX,RAX
TEST RAX, RAX
JNZ @@50
//Out of memory
@@45: LOCK
BTR U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED
@@45: LOCK
BTR U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
POPFD
PUSH TRUE
MOV RAX,'OutMem'
MOV RAX, 'OutMem'
PUSH RAX
CALL I32 &throw
JMP I32 MALLOC_FINAL_EXIT //Never gets here, hopefully.
@@50: MOV RSI,RAX
MOV EAX,U32 CMemBlk.pags[RSI]
SHL RAX,MEM_PAG_BITS
@@50: MOV RSI, RAX
MOV EAX, U32 CMemBlk.pags[RSI]
SHL RAX, MEM_PAG_BITS
//Can it be combined with last chunk? (Never Free these chunks.)
MOV RDI,U64 CHeapCtrl.last_mergable[RDX]
LEA RBX,U64 [RSI+RAX]
CMP RDI,RBX
MOV RDI, U64 CHeapCtrl.last_mergable[RDX]
LEA RBX, U64 [RSI + RAX]
CMP RDI, RBX
JNE @@55
PUSH RAX
MOV EAX,U32 CMemBlk.pags[RDI]
MOV EAX, U32 CMemBlk.pags[RDI]
ADD U32 CMemBlk.pags[RSI],EAX
//QueueRemove
MOV RAX,U64 CMemBlk.next[RDI]
MOV RBX,U64 CMemBlk.last[RDI]
MOV U64 CMemBlk.last[RAX],RBX
MOV U64 CMemBlk.next[RBX],RAX
MOV RAX, U64 CMemBlk.next[RDI]
MOV RBX, U64 CMemBlk.last[RDI]
MOV U64 CMemBlk.last[RAX], RBX
MOV U64 CMemBlk.next[RBX], RAX
POP RAX
@@55: MOV U64 CHeapCtrl.last_mergable[RDX],RSI
LEA RSI,U64 sizeof(CMemBlk)[RSI]
SUB RAX,sizeof(CMemBlk)
LEA RBX,U64 CHeapCtrl.malloc_free_list-CMemUnused.next[RDX]
MOV RDI,U64 CMemUnused.next[RBX]
MOV U64 CMemUnused.next[RSI],RDI
MOV U64 CMemUnused.size[RSI],RAX
MOV U64 CMemUnused.next[RBX],RSI
POP RAX //+****
@@55: MOV U64 CHeapCtrl.last_mergable[RDX], RSI
LEA RSI, U64 sizeof(CMemBlk)[RSI]
SUB RAX, sizeof(CMemBlk)
LEA RBX, U64 CHeapCtrl.malloc_free_list - CMemUnused.next[RDX]
MOV RDI, U64 CMemUnused.next[RBX]
MOV U64 CMemUnused.next[RSI], RDI
MOV U64 CMemUnused.size[RSI], RAX
MOV U64 CMemUnused.next[RBX], RSI
POP RAX //+****
JMP @@70
@@60: CMP U64 CMemUnused.size[RSI],RAX
@@60: CMP U64 CMemUnused.size[RSI], RAX
JB I32 @@40
JNE @@70
@@65: MOV RDI,U64 CMemUnused.next[RSI]
MOV U64 CMemUnused.next[RBX],RDI
@@65: MOV RDI, U64 CMemUnused.next[RSI]
MOV U64 CMemUnused.next[RBX], RDI
JMP MALLOC_ALMOST_DONE
@@70: SUB U64 CMemUnused.size[RSI],RAX //UPDATE FREE ENTRY
CMP U64 CMemUnused.size[RSI],sizeof(CMemUnused)
@@70: SUB U64 CMemUnused.size[RSI], RAX //UPDATE FREE ENTRY
CMP U64 CMemUnused.size[RSI], sizeof(CMemUnused)
JAE @@75 //take from top of block
ADD U64 CMemUnused.size[RSI],RAX //doesn't fit, undo
ADD U64 CMemUnused.size[RSI], RAX //doesn't fit, undo
JMP I32 @@40
@@75: ADD RSI,U64 CMemUnused.size[RSI]
@@75: ADD RSI, U64 CMemUnused.size[RSI]
MALLOC_ALMOST_DONE:
//RSI=res-CMemUsed.size
//RAX=size+CMemUsed.size
//RDX=HeapCtrl
ADD U64 CHeapCtrl.used_u8s[RDX],RAX
//RSI = res - CMemUsed.size
//RAX = size + CMemUsed.size
//RDX = HeapCtrl
ADD U64 CHeapCtrl.used_u8s[RDX], RAX
#if _CONFIG_HEAP_DEBUG
//QueueInsert
MOV RDI,U64 CHeapCtrl.last_um[RDX]
MOV U64 CMemUsed.next[RDI],RSI
MOV U64 CHeapCtrl.last_um[RDX],RSI
MOV U64 CMemUsed.last[RSI],RDI
LEA RDI,U64 CHeapCtrl.next_um-CMemUsed.next[RDX]
MOV U64 CMemUsed.next[RSI],RDI
MOV RDI, U64 CHeapCtrl.last_um[RDX]
MOV U64 CMemUsed.next[RDI], RSI
MOV U64 CHeapCtrl.last_um[RDX], RSI
MOV U64 CMemUsed.last[RSI], RDI
LEA RDI, U64 CHeapCtrl.next_um - CMemUsed.next[RDX]
MOV U64 CMemUsed.next[RSI], RDI
//Caller1/Caller2
PUSH RDX
MOV RDX,U64 [MEM_HEAP_LIMIT]
MOV RDI,U64 SF_RIP[RBP]
CMP RDI,RDX
MOV RDX, U64 [MEM_HEAP_LIMIT]
MOV RDI, U64 SF_RIP[RBP]
CMP RDI, RDX
JB @@80
XOR RDI,RDI
MOV U64 CMemUsed.caller1[RSI],RDI
XOR RDI, RDI
MOV U64 CMemUsed.caller1[RSI], RDI
JMP @@90
@@80: MOV U64 CMemUsed.caller1[RSI],RDI
MOV RDI,U64 SF_RBP[RBP]
CMP RDI,RDX
@@80: MOV U64 CMemUsed.caller1[RSI], RDI
MOV RDI, U64 SF_RBP[RBP]
CMP RDI, RDX
JB @@85
XOR RDI,RDI
XOR RDI, RDI
JMP @@90
@@85: MOV RDI,U64 SF_RIP[RDI]
CMP RDI,RDX
@@85: MOV RDI, U64 SF_RIP[RDI]
CMP RDI, RDX
JB @@90
XOR RDI,RDI
@@90: MOV U64 CMemUsed.caller2[RSI],RDI
XOR RDI, RDI
@@90: MOV U64 CMemUsed.caller2[RSI], RDI
POP RDX
#endif
LOCK
BTR U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED
BTR U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
POPFD
MOV U64 CMemUsed.size[RSI],RAX
MOV U64 CMemUsed.hc[RSI],RDX
LEA RAX,U64 CMemUsed.start[RSI]
MOV U64 CMemUsed.size[RSI], RAX
MOV U64 CMemUsed.hc[RSI], RDX
LEA RAX, U64 CMemUsed.start[RSI]
TEST U8 [SYS_SEMAS+SEMA_HEAPLOG_ACTIVE*DEFAULT_CACHE_LINE_WIDTH],1
TEST U8 [SYS_SEMAS + SEMA_HEAPLOG_ACTIVE * DEFAULT_CACHE_LINE_WIDTH], 1
JZ @@105
PUSH RAX
PUSH RAX
MOV RAX,U64 [SYS_EXTERN_TABLE]
MOV RAX,U64 EXT_HEAPLOG_MALLOC*8[RAX]
TEST RAX,RAX
MOV RAX, U64 [SYS_EXTERN_TABLE]
MOV RAX, U64 EXT_HEAPLOG_MALLOC*8[RAX]
TEST RAX, RAX
JZ @@95
CALL RAX
JMP @@100
@@95: ADD RSP,8
@@100: POP RAX
@@95: ADD RSP, 8
@@100: POP RAX
@@105: TEST U8 [SYS_HEAP_INIT_FLAG],1
@@105: TEST U8 [SYS_HEAP_INIT_FLAG], 1
JZ MALLOC_FINAL_EXIT
PUSH RAX
MOV RCX,U64 CMemUsed.size-CMemUsed.start[RAX]
SUB RCX,CMemUsed.start
MOV RDI,RAX
MOV AL,U8 [SYS_HEAP_INIT_VAL]
MOV RCX, U64 CMemUsed.size - CMemUsed.start[RAX]
SUB RCX, CMemUsed.start
MOV RDI, RAX
MOV AL, U8 [SYS_HEAP_INIT_VAL]
REP_STOSB
POP RAX
@ -219,34 +219,35 @@ MALLOC_FINAL_EXIT:
POP RSI
POP RBP
RET1 16
//************************************
_FREE::
//Be aware of $LK,"heap_hash",A="FF:::/Kernel/Memory/MAllocFree.CC,heap_hash"$ in $LK,"MemPagTaskAlloc",A="MN:MemPagTaskAlloc"$().
PUSH RBP
MOV RBP,RSP
MOV RBP, RSP
PUSH RSI
PUSH RDI
TEST U8 [SYS_SEMAS+SEMA_HEAPLOG_ACTIVE*DEFAULT_CACHE_LINE_WIDTH],1
TEST U8 [SYS_SEMAS + SEMA_HEAPLOG_ACTIVE * DEFAULT_CACHE_LINE_WIDTH], 1
JZ @@15
MOV RBX,U64 SF_ARG1[RBP]
TEST RBX,RBX
MOV RBX, U64 SF_ARG1[RBP]
TEST RBX, RBX
JZ @@05
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX]
TEST RAX,RAX
MOV RAX, U64 CMemUsed.size - CMemUsed.start[RBX]
TEST RAX, RAX
JGE @@05 //Aligned alloced chunks have neg size
ADD RBX,RAX
@@05: PUSH RBX
MOV RAX,U64 [SYS_EXTERN_TABLE]
MOV RAX,U64 EXT_HEAPLOG_FREE*8[RAX]
TEST RAX,RAX
ADD RBX, RAX
@@05: PUSH RBX
MOV RAX, U64 [SYS_EXTERN_TABLE]
MOV RAX, U64 EXT_HEAPLOG_FREE*8[RAX]
TEST RAX, RAX
JZ @@10
CALL RAX
JMP @@15
@@10: ADD RSP,8
@@10: ADD RSP, 8
@@15: MOV RSI,U64 SF_ARG1[RBP]
TEST RSI,RSI
@@15: MOV RSI, U64 SF_ARG1[RBP]
TEST RSI, RSI
#if _CONFIG_HEAP_DEBUG
JZ I32 FREE_DONE
@ -254,192 +255,202 @@ _FREE::
JZ FREE_DONE
#endif
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RSI]
TEST RAX,RAX
MOV RAX, U64 CMemUsed.size - CMemUsed.start[RSI]
TEST RAX, RAX
JGE @@20 //Aligned alloced chunks have neg size.
//The neg size is offset to start of $LK,"CMemUsed",A="MN:CMemUsed"$ struct.
ADD RSI,RAX
//The neg size is offset to start of $LK,"CMemUsed",A="MN:CMemUsed"$ struct.
ADD RSI, RAX
@@20: PUSHFD
SUB RSI,CMemUsed.start
MOV RDX,U64 CMemUsed.hc[RSI]
CMP U32 CHeapCtrl.hc_signature[RDX],HEAP_CTRL_SIGNATURE_VAL
@@20: PUSHFD
SUB RSI, CMemUsed.start
MOV RDX, U64 CMemUsed.hc[RSI]
CMP U32 CHeapCtrl.hc_signature[RDX], HEAP_CTRL_SIGNATURE_VAL
JE @@25
ADD RSI,CMemUsed.start
ADD RSI, CMemUsed.start
PUSH RSI
CALL &SysBadFree
JMP I32 _SYS_HLT
@@25: MOV RAX,U64 CMemUsed.size[RSI]
SUB U64 CHeapCtrl.used_u8s[RDX],RAX
@@25: MOV RAX, U64 CMemUsed.size[RSI]
SUB U64 CHeapCtrl.used_u8s[RDX], RAX
CLI
@@30: LOCK
BTS U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED
@@30: LOCK
BTS U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
PAUSE
JC @@30
#if _CONFIG_HEAP_DEBUG
//QueueRemove
MOV RDX,U64 CMemUsed.next[RSI]
MOV RDI,U64 CMemUsed.last[RSI]
MOV U64 CMemUsed.last[RDX],RDI
MOV U64 CMemUsed.next[RDI],RDX
MOV RDX, U64 CMemUsed.next[RSI]
MOV RDI, U64 CMemUsed.last[RSI]
MOV U64 CMemUsed.last[RDX], RDI
MOV U64 CMemUsed.next[RDI], RDX
//Caller1/Caller2
MOV RDX,U64 [MEM_HEAP_LIMIT]
MOV RDI,U64 SF_RIP[RBP]
CMP RDI,RDX
MOV RDX, U64 [MEM_HEAP_LIMIT]
MOV RDI, U64 SF_RIP[RBP]
CMP RDI, RDX
JB @@35
XOR RDI,RDI
MOV U64 CMemUnused.caller1[RSI],RDI
XOR RDI, RDI
MOV U64 CMemUnused.caller1[RSI], RDI
JMP @@45
@@35: MOV U64 CMemUnused.caller1[RSI],RDI
MOV RDI,U64 SF_RBP[RBP]
CMP RDI,RDX
@@35: MOV U64 CMemUnused.caller1[RSI], RDI
MOV RDI, U64 SF_RBP[RBP]
CMP RDI, RDX
JB @@40
XOR RDI,RDI
XOR RDI, RDI
JMP @@45
@@40: MOV RDI,U64 SF_RIP[RDI]
CMP RDI,RDX
@@40: MOV RDI, U64 SF_RIP[RDI]
CMP RDI, RDX
JB @@45
XOR RDI,RDI
@@45: MOV U64 CMemUnused.caller2[RSI],RDI
XOR RDI, RDI
@@45: MOV U64 CMemUnused.caller2[RSI], RDI
MOV RDX,U64 CMemUsed.hc[RSI]
MOV RDX, U64 CMemUsed.hc[RSI]
#endif
CMP RAX,MEM_HEAP_HASH_SIZE
CMP RAX, MEM_HEAP_HASH_SIZE
JAE @@50
#assert CMemUnused.size==CMemUsed.size
// MOV U64 CMemUnused.size[RSI],RAX
#assert CMemUnused.size == CMemUsed.size
// MOV U64 CMemUnused.size[RSI], RAX
MOV RBX,U64 CHeapCtrl.heap_hash[RAX+RDX]
MOV U64 CMemUnused.next[RSI],RBX
MOV U64 CHeapCtrl.heap_hash[RAX+RDX],RSI
MOV RBX, U64 CHeapCtrl.heap_hash[RAX + RDX]
MOV U64 CMemUnused.next[RSI], RBX
MOV U64 CHeapCtrl.heap_hash[RAX + RDX], RSI
JMP @@55
@@50: SUB RSI,sizeof(CMemBlk)
@@50: SUB RSI, sizeof(CMemBlk)
PUSH RDX
PUSH RDX
PUSH RSI
CALL &MemPagTaskFree
POP RDX
@@55: LOCK
BTR U32 CHeapCtrl.locked_flags[RDX],HClf_LOCKED
@@55: LOCK
BTR U32 CHeapCtrl.locked_flags[RDX], HClf_LOCKED
POPFD
FREE_DONE:
POP RDI
POP RSI
POP RBP
RET1 8
//************************************
_MSIZE::
PUSH RBP
MOV RBP,RSP
MOV RBX,U64 SF_ARG1[RBP]
XOR RAX,RAX
TEST RBX,RBX
MOV RBP, RSP
MOV RBX, U64 SF_ARG1[RBP]
XOR RAX, RAX
TEST RBX, RBX
JZ @@10
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX]
TEST RAX,RAX
MOV RAX, U64 CMemUsed.size - CMemUsed.start[RBX]
TEST RAX, RAX
JGE @@05 //Aligned alloced chunks have neg size
ADD RBX,RAX
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX]
@@05: SUB RAX,CMemUsed.start
@@10: POP RBP
ADD RBX, RAX
MOV RAX, U64 CMemUsed.size - CMemUsed.start[RBX]
@@05: SUB RAX, CMemUsed.start
@@10: POP RBP
RET1 8
//************************************
_MSIZE2::
PUSH RBP
MOV RBP,RSP
MOV RBX,U64 SF_ARG1[RBP]
XOR RAX,RAX
TEST RBX,RBX
MOV RBP, RSP
MOV RBX, U64 SF_ARG1[RBP]
XOR RAX, RAX
TEST RBX, RBX
JZ @@10
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX]
TEST RAX,RAX
MOV RAX, U64 CMemUsed.size-CMemUsed.start[RBX]
TEST RAX, RAX
JGE @@05 //Aligned alloced chunks have neg size
ADD RBX,RAX
@@05: MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX]
@@10: POP RBP
ADD RBX, RAX
@@05: MOV RAX, U64 CMemUsed.size - CMemUsed.start[RBX]
@@10: POP RBP
RET1 8
//************************************
_MHEAP_CTRL::
PUSH RBP
MOV RBP,RSP
MOV RBX,U64 SF_ARG1[RBP]
XOR RAX,RAX
TEST RBX,RBX
MOV RBP, RSP
MOV RBX, U64 SF_ARG1[RBP]
XOR RAX, RAX
TEST RBX, RBX
JZ @@10
MOV RAX,U64 CMemUsed.size-CMemUsed.start[RBX]
TEST RAX,RAX
MOV RAX, U64 CMemUsed.size-CMemUsed.start[RBX]
TEST RAX, RAX
JGE @@05 //Aligned alloced chunks have neg size
ADD RBX,RAX
@@05: MOV RAX,U64 CMemUsed.hc-CMemUsed.start[RBX]
@@10: POP RBP
ADD RBX, RAX
@@05: MOV RAX, U64 CMemUsed.hc - CMemUsed.start[RBX]
@@10: POP RBP
RET1 8
}
_extern _FREE U0 Free(U8 *addr); //Free $LK,"MAlloc",A="MN:MAlloc"$()ed memory chunk.
_extern _MSIZE I64 MSize(U8 *src); //Size of heap object.
_extern _MSIZE2 I64 MSize2(U8 *src); //Internal size of heap object.
_extern _MHEAP_CTRL CHeapCtrl *MHeapCtrl(U8 *src); //$LK,"CHeapCtrl",A="MN:CHeapCtrl"$ of object.
_extern _MALLOC U8 *MAlloc(I64 size,CTask *mem_task=NULL); //Alloc memory chunk.
_extern _FREE U0 Free(U8 *addr); //Free $LK,"MAlloc",A="MN:MAlloc"$()ed memory chunk.
_extern _MSIZE I64 MSize( U8 *src); //Size of heap object.
_extern _MSIZE2 I64 MSize2( U8 *src); //Internal size of heap object.
_extern _MHEAP_CTRL CHeapCtrl *MHeapCtrl(U8 *src); //$LK,"CHeapCtrl",A="MN:CHeapCtrl"$ of object.
_extern _MALLOC U8 *MAlloc(I64 size, CTask *mem_task=NULL); //Alloc memory chunk.
//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap.
U8 *ZMAlloc(I64 size)
{//Alloc memory in Zenith's heap.
return MAlloc(size,zenith_task);
return MAlloc(size, zenith_task);
}
U8 *CAlloc(I64 size,CTask *mem_task=NULL)
U8 *CAlloc(I64 size, CTask *mem_task=NULL)
{//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap.
U8 *res=MAlloc(size,mem_task);
MemSet(res,0,size);
U8 *res = MAlloc(size, mem_task);
MemSet(res, 0, size);
return res;
}
U8 *ZCAlloc(I64 size)
{//Alloc and set to zero memory in Zenith's heap.
return CAlloc(size,zenith_task);
return CAlloc(size, zenith_task);
}
U8 *MAllocIdent(U8 *src,CTask *mem_task=NULL)
U8 *MAllocIdent(U8 *src, CTask *mem_task=NULL)
{//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap.
U8 *res;
I64 size;
if (!src) return NULL;
size=MSize(src);
res=MAlloc(size,mem_task);
MemCopy(res,src,size);
if (!src)
return NULL;
size = MSize(src);
res = MAlloc(size, mem_task);
MemCopy(res, src, size);
return res;
}
U8 *ZMAllocIdent(U8 *src)
{//Alloc in Zenith's heap, ident copy of heap node.
return MAllocIdent(src,zenith_task);
return MAllocIdent(src, zenith_task);
}
U8 *MAllocAligned(I64 size,I64 alignment,
CTask *mem_task=NULL,I64 misalignment=0)
U8 *MAllocAligned(I64 size, I64 alignment, CTask *mem_task=NULL, I64 misalignment=0)
{//Only powers of two alignment. This is awful.
I64 mask=alignment-1;
U8 *ptr=MAlloc(size+mask+sizeof(I64)+misalignment,mem_task),
*res=(ptr+sizeof(I64)+mask)&~mask+misalignment;
res(I64 *)[-1]=ptr-res;
#assert offset(CMemUsed.size)==offset(CMemUsed.start)-sizeof(I64)
I64 mask = alignment - 1;
U8 *ptr = MAlloc(size + mask + sizeof(I64) + misalignment, mem_task),
*res = (ptr + sizeof(I64) + mask) & ~mask + misalignment;
res(I64 *)[-1] = ptr - res;
#assert offset(CMemUsed.size) == offset(CMemUsed.start) - sizeof(I64)
return res;
}
U8 *CAllocAligned(I64 size,I64 alignment,
CTask *mem_task=NULL,I64 misalignment=0)
U8 *CAllocAligned(I64 size, I64 alignment, CTask *mem_task=NULL, I64 misalignment=0)
{//Only powers of two alignment. This is awful.
I64 mask=alignment-1;
U8 *ptr=MAlloc(size+mask+sizeof(I64)+misalignment,mem_task),
*res=(ptr+sizeof(I64)+mask)&~mask+misalignment;
res(I64 *)[-1]=ptr-res;
#assert offset(CMemUsed.size)==offset(CMemUsed.start)-sizeof(I64)
MemSet(res,0,size);
I64 mask = alignment-1;
U8 *ptr = MAlloc(size + mask + sizeof(I64) + misalignment, mem_task),
*res = (ptr + sizeof(I64) + mask) & ~mask + misalignment;
res(I64 *)[-1] = ptr - res;
#assert offset(CMemUsed.size) == offset(CMemUsed.start) - sizeof(I64)
MemSet(res, 0, size);
return res;
}
@ -449,18 +460,19 @@ U8 *ReAlloc(U8 *ptr, U64 new_size, CTask *mem_task=NULL)
//Useless for changing chunk sizes smaller than 8 bytes because MAlloc allocs 8 bytes at a time.
U8 *res;
if(!new_size)
if (!new_size)
{
Free(ptr); //we can free NULL
return NULL;
}
res = MAlloc(new_size, mem_task);
if(!ptr)
if (!ptr)
return res;
MemCopy(res, ptr, MinI64(MSize(ptr), new_size));
Free(ptr);
return res;
}
@ -469,22 +481,26 @@ U8 *ZReAlloc(U8 *ptr, I64 new_size)
return ReAlloc(ptr, new_size, zenith_task);
}
U8 *StrNew(U8 *buf,CTask *mem_task=NULL)
U8 *StrNew(U8 *buf, CTask *mem_task=NULL)
{//Accepts a $LK,"CTask",A="MN:CTask"$ or $LK,"CHeapCtrl",A="MN:CHeapCtrl"$. NULL allocs off current task's heap.
U8 *res;
I64 size;
if (buf) {
size=StrLen(buf)+1;
res=MAlloc(size,mem_task);
MemCopy(res,buf,size);
} else {
res=MAlloc(1,mem_task);
*res=0;
if (buf)
{
size = StrLen(buf) + 1;
res = MAlloc(size, mem_task);
MemCopy(res, buf, size);
}
else
{
res = MAlloc(1, mem_task);
*res = 0;
}
return res;
}
U8 *ZStrNew(U8 *buf)
{//Alloc copy of string in Zenith's heap.
return StrNew(buf,zenith_task);
return StrNew(buf, zenith_task);
}

View file

@ -1,166 +1,200 @@
U0 SysBadFree(I64 *ptr)
{
Panic("Bad Free:",ptr);
Panic("Bad Free:", ptr);
}
U0 SysBadMAlloc(I64 *ptr)
{
Panic("Bad MAlloc:",ptr);
Panic("Bad MAlloc:", ptr);
}
U8 *MemPagAlloc(I64 pags,CBlkPool *bp=NULL)
U8 *MemPagAlloc(I64 pags, CBlkPool *bp=NULL)
{/*Alloc pags from BlkPool. Don't link to task.
(Linking to a task means they will be freed when the task dies.)
It might give you more than you asked for.
Return: NULL if out of memory.
*/
CMemBlk *res=NULL,*m;
I64 i;
if (!bp) bp=sys_code_bp;
CMemBlk *res = NULL, *m;
I64 i;
if (!bp)
bp = sys_code_bp;
PUSHFD
CLI
while (LBts(&bp->locked_flags,BPlf_LOCKED))
while (LBts(&bp->locked_flags, BPlf_LOCKED))
PAUSE
if (pags<MEM_FREE_PAG_HASH_SIZE) {
if (res=bp->free_pag_hash[pags]) {
bp->free_pag_hash[pags]=res->next;
if (pags < MEM_FREE_PAG_HASH_SIZE)
{
if (res = bp->free_pag_hash[pags])
{
bp->free_pag_hash[pags] = res->next;
goto at_done;
}
i=Bsr(MEM_FREE_PAG_HASH_SIZE)+1;
i = Bsr(MEM_FREE_PAG_HASH_SIZE) + 1;
} else {
//We'll now round-up to a power of two.
//There is some overhead on allocations and
//we wouldn't want to round to the next
//power of two if a power of two was requested.
//So we use a little more than a power of two.
pags-=MEM_EXTRA_HASH2_PAGS;
i=Bsr(pags)+1;
pags=1<<i+MEM_EXTRA_HASH2_PAGS;
if (res=bp->free_pag_hash2[i]) {
bp->free_pag_hash2[i]=res->next;
pags -= MEM_EXTRA_HASH2_PAGS;
i = Bsr(pags) + 1;
pags = 1 << i + MEM_EXTRA_HASH2_PAGS;
if (res = bp->free_pag_hash2[i])
{
bp->free_pag_hash2[i] = res->next;
goto at_done;
}
}
m=&bp->mem_free_list;
while (TRUE) {
if (!(res=m->next)) {
m = &bp->mem_free_list;
while (TRUE)
{
if (!(res = m->next))
{
//We're probably out of luck, but lets search for a
//freed larger size block... and, screw-it, return the whole thing.
do {
if (res=bp->free_pag_hash2[++i]) {
pags=1<<i+MEM_EXTRA_HASH2_PAGS;
bp->free_pag_hash2[i]=res->next;
do
{
if (res = bp->free_pag_hash2[++i])
{
pags = 1 << i + MEM_EXTRA_HASH2_PAGS;
bp->free_pag_hash2[i] = res->next;
goto at_done;
}
} while (i<64-MEM_PAG_BITS-1);
pags=0;
res=NULL; //Out of memory
}
while (i < 64 - MEM_PAG_BITS - 1);
pags = 0;
res = NULL; //Out of memory
goto at_done2;
}
if (res->pags<pags)
m=res;
else {
if (res->pags==pags) {
m->next=res->next;
if (res->pags < pags)
m = res;
else
{
if (res->pags == pags)
{
m->next = res->next;
goto at_done;
} else {
res->pags-=pags;
res(U8 *)+=res->pags<<MEM_PAG_BITS;
res->pags=pags;
}
else
{
res->pags -= pags;
res(U8 *) += res->pags << MEM_PAG_BITS;
res->pags = pags;
goto at_done;
}
}
}
at_done:
bp->used_u8s+=res->pags<<MEM_PAG_BITS;
bp->used_u8s += res->pags << MEM_PAG_BITS;
at_done2:
LBtr(&bp->locked_flags,BPlf_LOCKED);
LBtr(&bp->locked_flags, BPlf_LOCKED);
POPFD
return res;
}
U0 MemPagFree(CMemBlk *m,CBlkPool *bp=NULL)
U0 MemPagFree(CMemBlk *m, CBlkPool *bp=NULL)
{//Return non-task pags to BlkPool.
I64 i,pags;
if (m) {
if (!bp) bp=sys_code_bp;
I64 i, pags;
if (m)
{
if (!bp)
bp = sys_code_bp;
PUSHFD
CLI
while (LBts(&bp->locked_flags,BPlf_LOCKED))
while (LBts(&bp->locked_flags, BPlf_LOCKED))
PAUSE
pags=m->pags;
m->mb_signature=MBS_UNUSED_SIGNATURE_VAL;
bp->used_u8s-=pags<<MEM_PAG_BITS;
if (pags<MEM_FREE_PAG_HASH_SIZE) {
m->next=bp->free_pag_hash[pags];
bp->free_pag_hash[pags]=m;
} else {
pags = m->pags;
m->mb_signature = MBS_UNUSED_SIGNATURE_VAL;
bp->used_u8s -= pags << MEM_PAG_BITS;
if (pags < MEM_FREE_PAG_HASH_SIZE)
{
m->next = bp->free_pag_hash[pags];
bp->free_pag_hash[pags] = m;
}
else
{
//We'll now round-up to a power of two.
//There is some overhead on allocations and
//we wouldn't want to round to the next
//power of two if a power of two was requested.
//So we use a little more than a power of two.
pags-=MEM_EXTRA_HASH2_PAGS;
i=Bsr(pags);
m->next=bp->free_pag_hash2[i];
bp->free_pag_hash2[i]=m;
pags -= MEM_EXTRA_HASH2_PAGS;
i = Bsr(pags);
m->next = bp->free_pag_hash2[i];
bp->free_pag_hash2[i] = m;
}
LBtr(&bp->locked_flags,BPlf_LOCKED);
LBtr(&bp->locked_flags, BPlf_LOCKED);
POPFD
}
}
CMemBlk *MemPagTaskAlloc(I64 pags,CHeapCtrl *hc)
CMemBlk *MemPagTaskAlloc(I64 pags, CHeapCtrl *hc)
{/*hc must be locked. Don't preempt this routine.
Currently, this is only called from $LK,"MAlloc",A="MN:MAlloc"$().
Return: NULL if out of memory.
*/
CMemBlk *res;
I64 threshold,count,size;
CMemUnused *uum,**_uum,**_ptr;
if (res=MemPagAlloc(pags,hc->bp)) {
QueueInsert(res,hc->last_mem_blk);
res->mb_signature=MBS_USED_SIGNATURE_VAL;
hc->alloced_u8s+=res->pags<<MEM_PAG_BITS;
CMemBlk *res;
I64 threshold, count, size;
CMemUnused *uum, **_uum, **_ptr;
if (res = MemPagAlloc(pags, hc->bp))
{
QueueInsert(res, hc->last_mem_blk);
res->mb_signature = MBS_USED_SIGNATURE_VAL;
hc->alloced_u8s += res->pags << MEM_PAG_BITS;
//Tidy-up free list (Move into heap hash)
//because if free list gets long, delay causes crash.
threshold=MEM_HEAP_HASH_SIZE>>4;
#assert MEM_HEAP_HASH_SIZE>>4>=sizeof(U8 *)
do {
count=0;
_uum=&hc->malloc_free_list;
while (uum=*_uum) {
threshold = MEM_HEAP_HASH_SIZE >> 4;
#assert MEM_HEAP_HASH_SIZE >> 4 >= sizeof(U8 *)
do
{
count = 0;
_uum = &hc->malloc_free_list;
while (uum = *_uum)
{
#assert !offset(CMemUnused.next)
size=uum->size;
if (size<threshold) {
*_uum=uum->next;
_ptr=(&hc->heap_hash)(U8 *)+size;
uum->next=*_ptr;
*_ptr=uum;
} else {
size = uum->size;
if (size < threshold)
{
*_uum = uum->next;
_ptr = (&hc->heap_hash)(U8 *) + size;
uum->next = *_ptr;
*_ptr = uum;
}
else
{
count++;
_uum=uum;
_uum = uum;
}
}
threshold<<=1;
} while (count>8 && threshold<=MEM_HEAP_HASH_SIZE);
threshold <<= 1;
}
while (count > 8 && threshold <= MEM_HEAP_HASH_SIZE);
}
return res;
}
U0 MemPagTaskFree(CMemBlk *m,CHeapCtrl *hc)
U0 MemPagTaskFree(CMemBlk *m, CHeapCtrl *hc)
{//hc must be locked
if (m) {
if (m)
{
PUSHFD
CLI
if (m->mb_signature!=MBS_USED_SIGNATURE_VAL)
if (m->mb_signature != MBS_USED_SIGNATURE_VAL)
SysBadFree(m);
else {
else
{
QueueRemove(m);
hc->alloced_u8s-=m->pags<<MEM_PAG_BITS;
MemPagFree(m,hc->bp);
hc->alloced_u8s -= m->pags << MEM_PAG_BITS;
MemPagFree(m, hc->bp);
}
POPFD
}

View file

@ -1,53 +1,59 @@
Bool Mem32DevIns(CMemRange *tmpmr)
{
CMemRange *tmpmr1=dev.mem32_head.next,*tmpmr2;
while (tmpmr1!=&dev.mem32_head) {
if (!tmpmr1->type && tmpmr->base>=tmpmr1->base &&
tmpmr->base+tmpmr->size<=tmpmr1->base+tmpmr1->size) {
if (tmpmr->base>tmpmr1->base) {
tmpmr2=ZMAlloc(sizeof(CMemRange));
tmpmr2->type=MRT_UNUSED;
tmpmr2->flags=0;
tmpmr2->base=tmpmr1->base;
tmpmr2->size=tmpmr->base-tmpmr1->base;
QueueInsertRev(tmpmr2,tmpmr1);
CMemRange *tmpmr1 = dev.mem32_head.next, *tmpmr2;
while (tmpmr1 != &dev.mem32_head)
{
if (!tmpmr1->type && tmpmr->base >= tmpmr1->base && tmpmr->base + tmpmr->size <= tmpmr1->base + tmpmr1->size)
{
if (tmpmr->base > tmpmr1->base)
{
tmpmr2 = ZMAlloc(sizeof(CMemRange));
tmpmr2->type = MRT_UNUSED;
tmpmr2->flags = 0;
tmpmr2->base = tmpmr1->base;
tmpmr2->size = tmpmr->base - tmpmr1->base;
QueueInsertRev(tmpmr2, tmpmr1);
}
QueueInsertRev(tmpmr,tmpmr1);
tmpmr1->size=tmpmr1->base+tmpmr1->size-
(tmpmr->base+tmpmr->size);
tmpmr1->base=tmpmr->base+tmpmr->size;
if (!tmpmr1->size) {
QueueInsertRev(tmpmr, tmpmr1);
tmpmr1->size = tmpmr1->base + tmpmr1->size - (tmpmr->base + tmpmr->size);
tmpmr1->base = tmpmr->base + tmpmr->size;
if (!tmpmr1->size)
{
QueueRemove(tmpmr1);
Free(tmpmr1);
}
return TRUE;
}
tmpmr1=tmpmr1->next;
tmpmr1 = tmpmr1->next;
}
return FALSE;
}
U0 Mem32DevInit()
{
CMemRange *tmpmr;
CMemE820 *m20=MEM_E820;
CMemE820 *m20 = MEM_E820;
QueueInit(&dev.mem32_head);
tmpmr=ZMAlloc(sizeof(CMemRange));
tmpmr->type=MRT_UNUSED;
tmpmr->flags=0;
tmpmr = ZMAlloc(sizeof(CMemRange));
tmpmr->type = MRT_UNUSED;
tmpmr->flags = 0;
//Maybe !!! Change this to 0xF0000000 !!!
tmpmr->base=0xE0000000;
tmpmr->size=0x10000000;
QueueInsert(tmpmr,dev.mem32_head.last);
tmpmr->base = 0xE0000000;
tmpmr->size = 0x10000000;
QueueInsert(tmpmr, dev.mem32_head.last);
if (m20->type) {
while (m20->type) {
tmpmr=ZMAlloc(sizeof(CMemRange));
tmpmr->type=m20->type;
tmpmr->flags=0;
tmpmr->base=m20->base;
tmpmr->size=m20->len;
if (m20->type)
{
while (m20->type)
{
tmpmr = ZMAlloc(sizeof(CMemRange));
tmpmr->type = m20->type;
tmpmr->flags = 0;
tmpmr->base = m20->base;
tmpmr->size = m20->len;
if (!Mem32DevIns(tmpmr))
Free(tmpmr);
m20++;
@ -55,102 +61,118 @@ U0 Mem32DevInit()
}
}
U8 *Mem32DevAlloc(I64 size,I64 alignment)
U8 *Mem32DevAlloc(I64 size, I64 alignment)
{//Alloc 32-bit addr space for device. (Doesn't work.) Not used.
//For this to work the BIOS E820 map must be searched for gaps in
//the 32-bit range and the pool initialized to the gaps.
U8 *base,*limit;
CMemRange *tmpmr,*tmpmr1;
while (LBts(&sys_semas[SEMA_DEV_MEM],0))
U8 *base, *limit;
CMemRange *tmpmr, *tmpmr1;
while (LBts(&sys_semas[SEMA_DEV_MEM], 0))
Yield;
tmpmr1=dev.mem32_head.next;
while (tmpmr1!=&dev.mem32_head) {
base=(tmpmr1->base+alignment-1)&~(alignment-1);
limit=base+size-1;
if (!tmpmr1->type &&
limit<tmpmr1->base+tmpmr1->size) {
tmpmr=ZMAlloc(sizeof(CMemRange));
tmpmr->type=MRT_DEV;
tmpmr->flags=0;
tmpmr->base=base;
tmpmr->size=size;
if (!Mem32DevIns(tmpmr)) {
tmpmr1 = dev.mem32_head.next;
while (tmpmr1 != &dev.mem32_head)
{
base = (tmpmr1->base + alignment - 1) & ~(alignment - 1);
limit = base + size - 1;
if (!tmpmr1->type && limit < tmpmr1->base + tmpmr1->size)
{
tmpmr = ZMAlloc(sizeof(CMemRange));
tmpmr->type = MRT_DEV;
tmpmr->flags = 0;
tmpmr->base = base;
tmpmr->size = size;
if (!Mem32DevIns(tmpmr))
{
Free(tmpmr);
LBtr(&sys_semas[SEMA_DEV_MEM],0);
LBtr(&sys_semas[SEMA_DEV_MEM], 0);
return NULL;
}
LBtr(&sys_semas[SEMA_DEV_MEM],0);
LBtr(&sys_semas[SEMA_DEV_MEM], 0);
return tmpmr->base;
}
tmpmr1=tmpmr1->next;
tmpmr1 = tmpmr1->next;
}
LBtr(&sys_semas[SEMA_DEV_MEM],0);
LBtr(&sys_semas[SEMA_DEV_MEM], 0);
return NULL;
}
U0 Mem32DevFree(U8 *base)
{//Free 32-bit device address space.
CMemRange *tmpmr;
if (!base) return;
while (LBts(&sys_semas[SEMA_DEV_MEM],0))
if (!base)
return;
while (LBts(&sys_semas[SEMA_DEV_MEM], 0))
Yield;
tmpmr=dev.mem32_head.next;
while (tmpmr!=&dev.mem32_head) {
if (tmpmr->base==base) {
tmpmr->type=MRT_UNUSED;
tmpmr = dev.mem32_head.next;
while (tmpmr != &dev.mem32_head)
{
if (tmpmr->base == base)
{
tmpmr->type = MRT_UNUSED;
break;
}
tmpmr=tmpmr->next;
tmpmr = tmpmr->next;
}
LBtr(&sys_semas[SEMA_DEV_MEM],0);
LBtr(&sys_semas[SEMA_DEV_MEM], 0);
}
U8 *Mem64DevAlloc(I64 *_pages1Gig)
{//Alloc 64-bit addr space for device.
U8 *a;
I64 i=*_pages1Gig,*pte;
while (LBts(&sys_semas[SEMA_DEV_MEM],0))
I64 i = *_pages1Gig, *pte;
while (LBts(&sys_semas[SEMA_DEV_MEM], 0))
Yield;
while (i--) {
a=dev.mem64_ptr-=1<<30;
do {
pte=MemPageTable(a);
*pte=*pte&~0x18 |0x11; //Uncached and present
while (i--)
{
a = dev.mem64_ptr -= 1 << 30;
do
{
pte = MemPageTable(a);
*pte = *pte & ~0x18 | 0x11; //Uncached and present
InvalidatePage(dev.mem64_ptr);
a+=mem_page_size;
} while (a-dev.mem64_ptr<1<<30);
a += mem_page_size;
}
while (a - dev.mem64_ptr < 1 << 30);
}
LBtr(&sys_semas[SEMA_DEV_MEM],0);
LBtr(&sys_semas[SEMA_DEV_MEM], 0);
return dev.mem64_ptr;
}
U0 Mem64DevFree(U8 *base,I64 pages1Gig)
U0 Mem64DevFree(U8 *base, I64 pages1Gig)
{//Free 64-bit device address space.
if (!base) return;
while (LBts(&sys_semas[SEMA_DEV_MEM],0))
if (!base)
return;
while (LBts(&sys_semas[SEMA_DEV_MEM], 0))
Yield;
if (base==dev.mem64_ptr)
dev.mem64_ptr+=pages1Gig*1<<30;
if (base == dev.mem64_ptr)
dev.mem64_ptr += pages1Gig * 1 << 30;
//else not freed
LBtr(&sys_semas[SEMA_DEV_MEM],0);
LBtr(&sys_semas[SEMA_DEV_MEM], 0);
}
U0 UncachedAliasAlloc() //Make uncached alias for 4 lowest Gig.
{
I64 i=4,*pte;
I64 i = 4, *pte;
U8 *a;
a=dev.uncached_alias=Mem64DevAlloc(&i);
do {
pte=MemPageTable(a);
*pte=0x197+a-dev.uncached_alias;
a = dev.uncached_alias = Mem64DevAlloc(&i);
do
{
pte = MemPageTable(a);
*pte = 0x197 + a - dev.uncached_alias;
InvalidatePage(a);
a+=mem_page_size;
} while (a-dev.uncached_alias<1<<32);
a += mem_page_size;
}
while (a - dev.uncached_alias < 1 << 32);
}
I64 MemBIOSTotal()
{//Returns max of either E801 or E820 mem map.
I64 total01 = 0x100000, total20 = 0;
I64 total01 = 0x100000, total20 = 0;
U16 *mem01 = MEM_E801;
CMemE820 *mem20 = MEM_E820;
@ -159,28 +181,29 @@ I64 MemBIOSTotal()
if (mem20->type)
{
while(mem20->type)
while (mem20->type)
{
if(mem20->type == MEM_E820t_USABLE)
total20 += mem20->len;
mem20++;
}
}
}
return MaxI64(total01, total20);
}
I64 Scale2Mem(I64 min,I64 max,I64 limit=2*1024*1024*1024)
I64 Scale2Mem(I64 min, I64 max, I64 limit=2*1024*1024*1024)
{//Helps pick DiskCache and RAMDisk sizes.
//Can be used in $LK,"BootHDIns",A="MN:BootHDIns"$() config scripts.
I64 i;
if (sys_data_bp)
i=sys_data_bp->alloced_u8s;
i = sys_data_bp->alloced_u8s;
else
i=sys_code_bp->alloced_u8s;
if (i>=limit)
i = sys_code_bp->alloced_u8s;
if (i >= limit)
return max;
else
return min+(max-min)*i/limit;
return min + (max - min) * i / limit;
}
I64 Seg2Linear(U32 *ptr)

View file

@ -4,202 +4,202 @@ asm {
USE32
SYS_INIT_PAGE_TABLES::
//Check 1Gig page capability and set page size.
MOV EAX,0x80000001
MOV EAX, 0x80000001
CPUID
MOV EAX,1<<21
BT EDX,26
JNC @@05
MOV EAX,1<<30
@@05: MOV U32 [MEM_PAGE_SIZE],EAX
MOV EAX, 1 << 21
BT EDX, 26
JNC @@05
MOV EAX, 1 << 30
@@05: MOV U32 [MEM_PAGE_SIZE], EAX
//Set mapped space limit
MOV EAX,[MEM_PHYSICAL_SPACE]
MOV EDX,[MEM_PHYSICAL_SPACE+4]
BT U32 [MEM_PAGE_SIZE],30 //Round-up to 1Gig boundary?
MOV EAX, [MEM_PHYSICAL_SPACE]
MOV EDX, [MEM_PHYSICAL_SPACE + 4]
BT U32 [MEM_PAGE_SIZE], 30 //Round-up to 1Gig boundary?
JNC @@10
ADD EAX,0x3FFFFFFF
ADC EDX,0
AND EAX,~0x3FFFFFFF
@@10: INC EDX //Need 4Gig extra for uncached alias up at top of space.
MOV [MEM_MAPPED_SPACE],EAX
MOV [MEM_MAPPED_SPACE+4],EDX
ADD EAX, 0x3FFFFFFF
ADC EDX, 0
AND EAX, ~0x3FFFFFFF
@@10: INC EDX //Need 4Gig extra for uncached alias up at top of space.
MOV [MEM_MAPPED_SPACE], EAX
MOV [MEM_MAPPED_SPACE + 4], EDX
//How many 2Meg pages?
MOV CL,21
ADD EAX,0x1FFFFF
ADC EDX,0
SHRD EAX,EDX
SHR EDX,CL
MOV [MEM_2MEG_NUM],EAX
MOV [MEM_2MEG_NUM+4],EDX
MOV CL, 21
ADD EAX, 0x1FFFFF
ADC EDX, 0
SHRD EAX, EDX
SHR EDX, CL
MOV [MEM_2MEG_NUM], EAX
MOV [MEM_2MEG_NUM + 4], EDX
//How many 1Gig pages?
MOV CL,9
ADD EAX,0x1FF
ADC EDX,0
SHRD EAX,EDX
SHR EDX,CL
MOV [MEM_1GIG_NUM],EAX
MOV [MEM_1GIG_NUM+4],EDX
MOV CL, 9
ADD EAX, 0x1FF
ADC EDX, 0
SHRD EAX, EDX
SHR EDX, CL
MOV [MEM_1GIG_NUM], EAX
MOV [MEM_1GIG_NUM + 4], EDX
//How many 512Gig pages?
MOV CL,9
ADD EAX,0x1FF
ADC EDX,0
SHRD EAX,EDX
SHR EDX,CL
MOV [MEM_512GIG_NUM],EAX
MOV [MEM_512GIG_NUM+4],EDX
MOV CL, 9
ADD EAX, 0x1FF
ADC EDX, 0
SHRD EAX, EDX
SHR EDX, CL
MOV [MEM_512GIG_NUM], EAX
MOV [MEM_512GIG_NUM + 4], EDX
//Set $LK,"CSysFixedArea",A="MN:CSysFixedArea"$ to zero
MOV EDI,SYS_FIXED_AREA
XOR EAX,EAX
MOV ECX,sizeof(CSysFixedArea)/4
MOV EDI, SYS_FIXED_AREA
XOR EAX, EAX
MOV ECX, sizeof(CSysFixedArea) / 4
REP_STOSD
MOV U32 [MEM_PML2],EDI
MOV U32 [MEM_PML2], EDI
//Check for 1Gig page capability.
BT U32 [MEM_PAGE_SIZE],30
BT U32 [MEM_PAGE_SIZE], 30
JC @@15
//Find PML2 Size
MOV EAX,U32 [MEM_2MEG_NUM]
ADD EAX,0x1FF
AND EAX,~0x1FF
SHL EAX,3
ADD EDI,EAX
MOV EAX, U32 [MEM_2MEG_NUM]
ADD EAX, 0x1FF
AND EAX, ~0x1FF
SHL EAX, 3
ADD EDI, EAX
//Find PML3 Size
@@15: MOV U32 [MEM_PML3],EDI
MOV EAX,U32 [MEM_1GIG_NUM]
ADD EAX,0x1FF
AND EAX,~0x1FF
SHL EAX,3
ADD EDI,EAX
@@15: MOV U32 [MEM_PML3], EDI
MOV EAX, U32 [MEM_1GIG_NUM]
ADD EAX, 0x1FF
AND EAX, ~0x1FF
SHL EAX, 3
ADD EDI, EAX
//Find PML4 Size
MOV U32 [MEM_PML4],EDI
MOV EAX,U32 [MEM_512GIG_NUM]
ADD EAX,0x1FF
AND EAX,~0x1FF
SHL EAX,3
ADD EAX,EDI
MOV U32 [MEM_PML4], EDI
MOV EAX, U32 [MEM_512GIG_NUM]
ADD EAX, 0x1FF
AND EAX, ~0x1FF
SHL EAX, 3
ADD EAX, EDI
MOV U32 [MEM_HEAP_BASE],EAX
MOV U32 [MEM_HEAP_BASE], EAX
//Set page tables to zero
MOV EDI,U32 [MEM_PML2]
SUB EAX,EDI
MOV ECX,EAX
SHR ECX,2
XOR EAX,EAX
MOV EDI, U32 [MEM_PML2]
SUB EAX, EDI
MOV ECX, EAX
SHR ECX, 2
XOR EAX, EAX
REP_STOSD
//Check for 1Gig page capability.
BT U32 [MEM_PAGE_SIZE],30
BT U32 [MEM_PAGE_SIZE], 30
JC @@30
//PML2: Use 2Meg Pages
MOV EAX,0x87 //bit 7 is page size (2Meg)
XOR EDX,EDX
MOV EDI,[MEM_PML2]
MOV ECX,[MEM_2MEG_NUM]
@@20: MOV U32 [EDI],EAX
ADD EDI,4
MOV U32 [EDI],EDX
ADD EDI,4
ADD EAX,0x200000
ADC EDX,0
MOV EAX, 0x87 //bit 7 is page size (2Meg)
XOR EDX, EDX
MOV EDI, [MEM_PML2]
MOV ECX, [MEM_2MEG_NUM]
@@20: MOV U32 [EDI], EAX
ADD EDI, 4
MOV U32 [EDI], EDX
ADD EDI, 4
ADD EAX, 0x200000
ADC EDX, 0
LOOP @@20
//PML3: Use 2Meg Pages
MOV EAX,[MEM_PML2]
OR EAX,7
XOR EDX,EDX
MOV EDI,[MEM_PML3]
MOV ECX,[MEM_1GIG_NUM]
@@25: MOV U32 [EDI],EAX
ADD EDI,4
MOV U32 [EDI],EDX
ADD EDI,4
ADD EAX,0x1000
ADC EDX,0
MOV EAX, [MEM_PML2]
OR EAX, 7
XOR EDX, EDX
MOV EDI, [MEM_PML3]
MOV ECX, [MEM_1GIG_NUM]
@@25: MOV U32 [EDI], EAX
ADD EDI, 4
MOV U32 [EDI], EDX
ADD EDI, 4
ADD EAX, 0x1000
ADC EDX, 0
LOOP @@25
JMP @@40
//PML3: Use 1Gig Pages
@@30: MOV EAX,0x87 //bit 7 is page size (1Gig)
XOR EDX,EDX
MOV EDI,[MEM_PML3]
MOV ECX,[MEM_1GIG_NUM]
@@35: MOV U32 [EDI],EAX
ADD EDI,4
MOV U32 [EDI],EDX
ADD EDI,4
ADD EAX,0x40000000
ADC EDX,0
@@30: MOV EAX, 0x87 //bit 7 is page size (1Gig)
XOR EDX, EDX
MOV EDI, [MEM_PML3]
MOV ECX, [MEM_1GIG_NUM]
@@35: MOV U32 [EDI], EAX
ADD EDI, 4
MOV U32 [EDI], EDX
ADD EDI, 4
ADD EAX, 0x40000000
ADC EDX, 0
LOOP @@35
//PML4
@@40: MOV EAX,[MEM_PML3]
OR EAX,7
XOR EDX,EDX
MOV EDI,[MEM_PML4]
MOV ECX,[MEM_512GIG_NUM]
@@45: MOV U32 [EDI],EAX
ADD EDI,4
MOV U32 [EDI],EDX
ADD EDI,4
ADD EAX,0x1000
ADC EDX,0
@@40: MOV EAX, [MEM_PML3]
OR EAX, 7
XOR EDX, EDX
MOV EDI, [MEM_PML4]
MOV ECX, [MEM_512GIG_NUM]
@@45: MOV U32 [EDI], EAX
ADD EDI, 4
MOV U32 [EDI], EDX
ADD EDI, 4
ADD EAX, 0x1000
ADC EDX, 0
LOOP @@45
RET
SYS_INIT_16MEG_SYS_CODE_BP::
// Init sys_code_bp to BIOS E801 lowest 16Meg val.
// $LK,"BlkPoolsInit",A="MN:BlkPoolsInit"$() adds the rest.
MOV U32 [SYS_CODE_BP],SYS_FIXED_AREA+CSysFixedArea.sys_code_bp
MOV U32 [SYS_CODE_BP+4],0
MOV U32 [SYS_CODE_BP], SYS_FIXED_AREA + CSysFixedArea.sys_code_bp
MOV U32 [SYS_CODE_BP + 4], 0
MOV U32 [SYS_DATA_BP],0
MOV U32 [SYS_DATA_BP+4],0
MOV U32 [SYS_DATA_BP], 0
MOV U32 [SYS_DATA_BP + 4], 0
XOR EAX,EAX
MOV AX,U16 [MEM_E801] //1 Kb blks between 1M and 16M
SHL EAX,10
ADD EAX,0x100000
MOV EDI,U32 [MEM_HEAP_BASE]
SUB EAX,EDI
XOR EAX, EAX
MOV AX, U16 [MEM_E801] //1 Kb blks between 1M and 16M
SHL EAX, 10
ADD EAX, 0x100000
MOV EDI, U32 [MEM_HEAP_BASE]
SUB EAX, EDI
//EDI=BASE EAX=SIZE
TEST U8 [SYS_MEM_INIT_FLAG],1
TEST U8 [SYS_MEM_INIT_FLAG], 1
JZ @@05
PUSH EAX
PUSH EDI
MOV ECX,EAX
MOV AL,U8 [SYS_MEM_INIT_VAL]
MOV ECX, EAX
MOV AL, U8 [SYS_MEM_INIT_VAL]
REP_STOSB
POP EDI
POP EAX
@@05: SHR EAX,MEM_PAG_BITS
MOV ESI,SYS_FIXED_AREA+CSysFixedArea.sys_code_bp
MOV EBX,U32 CBlkPool.mem_free_list[ESI]
MOV U32 CMemBlk.next[EDI],EBX
MOV U32 CMemBlk.next+4[EDI],0
MOV U32 CBlkPool.mem_free_list[ESI],EDI
MOV U32 CBlkPool.mem_free_list+4[ESI],0
MOV U32 CMemBlk.mb_signature[EDI],MBS_UNUSED_SIGNATURE_VAL
MOV U32 CMemBlk.pags[EDI],EAX
SHL EAX,MEM_PAG_BITS
ADD U32 CBlkPool.alloced_u8s[ESI],EAX
@@05: SHR EAX, MEM_PAG_BITS
MOV ESI, SYS_FIXED_AREA + CSysFixedArea.sys_code_bp
MOV EBX, U32 CBlkPool.mem_free_list[ESI]
MOV U32 CMemBlk.next [EDI], EBX
MOV U32 CMemBlk.next + 4 [EDI], 0
MOV U32 CBlkPool.mem_free_list [ESI], EDI
MOV U32 CBlkPool.mem_free_list + 4 [ESI], 0
MOV U32 CMemBlk.mb_signature[EDI], MBS_UNUSED_SIGNATURE_VAL
MOV U32 CMemBlk.pags[EDI], EAX
SHL EAX, MEM_PAG_BITS
ADD U32 CBlkPool.alloced_u8s[ESI], EAX
BTS U32 [SYS_RUN_LEVEL],RLf_16MEG_SYS_CODE_BP
BTS U32 [SYS_RUN_LEVEL], RLf_16MEG_SYS_CODE_BP
RET
}
I64 *MemPageTable(U8 *a)
{//Point to page table entry for addr.
if (Bt(&mem_page_size,30))
return *MEM_PML3(U64 *)+a>>30*8;
if (Bt(&mem_page_size, 30))
return *MEM_PML3(U64 *) + a >> 30 * 8;
else
return *MEM_PML2(U64 *)+a>>21*8;
return *MEM_PML2(U64 *) + a >> 21 * 8;
}