Skip to content

Commit 2c5f7ab

Browse files
committed
initproc, usegment, swtch tweaks
1 parent b121486 commit 2c5f7ab

File tree

4 files changed

+46
-60
lines changed

4 files changed

+46
-60
lines changed

defs.h

+2-3
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ int writei(struct inode*, char*, uint, uint);
5252
// ide.c
5353
void ideinit(void);
5454
void ideintr(void);
55-
void iderw(struct buf *);
55+
void iderw(struct buf*);
5656

5757
// ioapic.c
5858
void ioapicenable(int irq, int cpu);
@@ -109,7 +109,7 @@ void wakeup(void*);
109109
void yield(void);
110110

111111
// swtch.S
112-
void swtch(struct context**, struct context**);
112+
void swtch(struct context**, struct context*);
113113

114114
// spinlock.c
115115
void acquire(struct spinlock*);
@@ -151,7 +151,6 @@ void uartinit(void);
151151
void uartintr(void);
152152
void uartputc(int);
153153

154-
155154
// number of elements in fixed-size array
156155
#define NELEM(x) (sizeof(x)/sizeof((x)[0]))
157156

proc.c

+39-46
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ static struct proc *initproc;
1515

1616
int nextpid = 1;
1717
extern void forkret(void);
18-
extern void forkret1(struct trapframe*);
18+
extern void trapret(void);
1919

2020
void
2121
pinit(void)
@@ -30,31 +30,39 @@ static struct proc*
3030
allocproc(void)
3131
{
3232
struct proc *p;
33+
char *sp;
3334

3435
acquire(&ptable.lock);
35-
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
36-
if(p->state == UNUSED){
37-
p->state = EMBRYO;
38-
p->pid = nextpid++;
36+
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
37+
if(p->state == UNUSED)
3938
goto found;
40-
}
41-
}
4239
release(&ptable.lock);
4340
return 0;
4441

4542
found:
43+
p->state = EMBRYO;
44+
p->pid = nextpid++;
4645
release(&ptable.lock);
4746

4847
// Allocate kernel stack if necessary.
4948
if((p->kstack = kalloc(KSTACKSIZE)) == 0){
5049
p->state = UNUSED;
5150
return 0;
5251
}
53-
p->tf = (struct trapframe*)(p->kstack + KSTACKSIZE) - 1;
54-
55-
// Set up new context to start executing at forkret (see below).
56-
p->context = (struct context *)p->tf - 1;
57-
memset(p->context, 0, sizeof(*p->context));
52+
sp = p->kstack + KSTACKSIZE;
53+
54+
// Leave room for trap frame.
55+
sp -= sizeof *p->tf;
56+
p->tf = (struct trapframe*)sp;
57+
58+
// Set up new context to start executing at forkret,
59+
// which returns to trapret (see below).
60+
sp -= 4;
61+
*(uint*)sp = (uint)trapret;
62+
63+
sp -= sizeof *p->context;
64+
p->context = (struct context*)sp;
65+
memset(p->context, 0, sizeof *p->context);
5866
p->context->eip = (uint)forkret;
5967
return p;
6068
}
@@ -79,19 +87,16 @@ growproc(int n)
7987
}
8088

8189
// Set up CPU's kernel segment descriptors.
90+
// Run once at boot time on each CPU.
8291
void
8392
ksegment(void)
8493
{
8594
struct cpu *c1;
8695

8796
c1 = &cpus[cpu()];
88-
c1->gdt[0] = SEG_NULL;
8997
c1->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0);
9098
c1->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
91-
c1->gdt[SEG_KCPU] = SEG(STA_W, (uint)&c1->tls+sizeof(c1->tls), 0xffffffff, 0);
92-
c1->gdt[SEG_UCODE] = SEG_NULL;
93-
c1->gdt[SEG_UDATA] = SEG_NULL;
94-
c1->gdt[SEG_TSS] = SEG_NULL;
99+
c1->gdt[SEG_KCPU] = SEG(STA_W, (uint)(&c1->tls+1), 0xffffffff, 0);
95100
lgdt(c1->gdt, sizeof(c1->gdt));
96101
loadfsgs(SEG_KCPU << 3);
97102

@@ -106,23 +111,12 @@ void
106111
usegment(void)
107112
{
108113
pushcli();
109-
c->ts.ss0 = SEG_KDATA << 3;
110-
if(cp)
111-
c->ts.esp0 = (uint)(cp->kstack + KSTACKSIZE);
112-
else
113-
c->ts.esp0 = 0xffffffff;
114-
115-
if(cp){
116-
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)cp->mem, cp->sz-1, DPL_USER);
117-
c->gdt[SEG_UDATA] = SEG(STA_W, (uint)cp->mem, cp->sz-1, DPL_USER);
118-
} else {
119-
c->gdt[SEG_UCODE] = SEG_NULL;
120-
c->gdt[SEG_UDATA] = SEG_NULL;
121-
}
114+
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)cp->mem, cp->sz-1, DPL_USER);
115+
c->gdt[SEG_UDATA] = SEG(STA_W, (uint)cp->mem, cp->sz-1, DPL_USER);
122116
c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint)&c->ts, sizeof(c->ts)-1, 0);
123117
c->gdt[SEG_TSS].s = 0;
124-
125-
lgdt(c->gdt, sizeof(c->gdt));
118+
c->ts.ss0 = SEG_KDATA << 3;
119+
c->ts.esp0 = (uint)cp->kstack + KSTACKSIZE;
126120
ltr(SEG_TSS << 3);
127121
popcli();
128122
}
@@ -171,14 +165,15 @@ void
171165
userinit(void)
172166
{
173167
struct proc *p;
174-
extern uchar _binary_initcode_start[], _binary_initcode_size[];
168+
extern char _binary_initcode_start[], _binary_initcode_size[];
175169

176170
p = allocproc();
177171
initproc = p;
178172

179173
// Initialize memory from initcode.S
180174
p->sz = PAGE;
181175
p->mem = kalloc(p->sz);
176+
memset(p->mem, 0, p->sz);
182177
memmove(p->mem, _binary_initcode_start, (int)_binary_initcode_size);
183178

184179
memset(p->tf, 0, sizeof(*p->tf));
@@ -210,7 +205,7 @@ scheduler(void)
210205
struct proc *p;
211206

212207
for(;;){
213-
// Enable interrupts on this processor, in lieu of saving intena.
208+
// Enable interrupts on this processor.
214209
sti();
215210

216211
// Loop over process table looking for process to run.
@@ -225,44 +220,43 @@ scheduler(void)
225220
cp = p;
226221
usegment();
227222
p->state = RUNNING;
228-
swtch(&c->context, &p->context);
223+
swtch(&c->context, p->context);
229224

230225
// Process is done running for now.
231226
// It should have changed its p->state before coming back.
232227
cp = 0;
233-
usegment();
234228
}
235229
release(&ptable.lock);
236230

237231
}
238232
}
239233

240-
// Enter scheduler. Must already hold ptable.lock
234+
// Enter scheduler. Must hold only ptable.lock
241235
// and have changed cp->state.
242236
void
243237
sched(void)
244238
{
245239
int intena;
246240

247-
if(readeflags()&FL_IF)
248-
panic("sched interruptible");
249-
if(cp->state == RUNNING)
250-
panic("sched running");
251241
if(!holding(&ptable.lock))
252242
panic("sched ptable.lock");
253243
if(c->ncli != 1)
254244
panic("sched locks");
245+
if(cp->state == RUNNING)
246+
panic("sched running");
247+
if(readeflags()&FL_IF)
248+
panic("sched interruptible");
255249

256250
intena = c->intena;
257-
swtch(&cp->context, &c->context);
251+
swtch(&cp->context, c->context);
258252
c->intena = intena;
259253
}
260254

261255
// Give up the CPU for one scheduling round.
262256
void
263257
yield(void)
264258
{
265-
acquire(&ptable.lock);
259+
acquire(&ptable.lock); //DOC: yieldlock
266260
cp->state = RUNNABLE;
267261
sched();
268262
release(&ptable.lock);
@@ -275,9 +269,8 @@ forkret(void)
275269
{
276270
// Still holding ptable.lock from scheduler.
277271
release(&ptable.lock);
278-
279-
// Jump into assembly, never to return.
280-
forkret1(cp->tf);
272+
273+
// Return to "caller", actually trapret (see allocproc).
281274
}
282275

283276
// Atomically release lock and sleep on chan.

swtch.S

+5-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
1-
# void swtch(struct context **old, struct context **new);
2-
#
1+
# Context switch
2+
#
3+
# void swtch(struct context **old, struct context *new);
4+
#
35
# Save current register context in old
46
# and then load register context from new.
57

@@ -16,7 +18,7 @@ swtch:
1618

1719
# Switch stacks
1820
movl %esp, (%eax)
19-
movl (%edx), %esp
21+
movl %edx, %esp
2022

2123
# Load new callee-save registers
2224
popl %edi

trapasm.S

-8
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,3 @@ trapret:
3535
popl %ds
3636
addl $0x8, %esp # trapno and errcode
3737
iret
38-
39-
# A forked process switches to user mode by calling
40-
# forkret1(tf), where tf is the trap frame to use.
41-
.globl forkret1
42-
forkret1:
43-
movl 4(%esp), %esp
44-
jmp trapret
45-

0 commit comments

Comments
 (0)