在xv6中,调度发生的两种情况:
xv6用struct proc来描述进程
// Per-process state
struct proc {
struct spinlock lock;
// p->lock must be held when using these:
enum procstate state; // Process state
struct proc *parent; // Parent process
void *chan; // If non-zero, sleeping on chan
int killed; // If non-zero, have been killed
int xstate; // Exit status to be returned to parent's wait
int pid; // Process ID
// these are private to the process, so p->lock need not be held.
uint64 kstack; // Virtual address of kernel stack
uint64 sz; // Size of process memory (bytes)
pagetable_t pagetable; // User page table
struct trapframe *trapframe; // data page for trampoline.S
struct context context; // swtch() here to run process
struct file *ofile[NOFILE]; // Open files
struct inode *cwd; // Current directory
char name[16]; // Process name (debugging)
// lab3 add
pagetable_t ptb_k; // kernel page table
};
enum procstate { UNUSED, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
进程有5种状态:未使用、睡眠、可运行、正在运行、僵尸
在struct proc中有一字段struct context context
用于描述进程上下文
// Saved registers for kernel context switches.
struct context {
uint64 ra; // 返回地址寄存器
uint64 sp; // 内核栈指针
// callee-saved,被调用者应该保存的寄存器
uint64 s0;
uint64 s1;
uint64 s2;
uint64 s3;
uint64 s4;
uint64 s5;
uint64 s6;
uint64 s7;
uint64 s8;
uint64 s9;
uint64 s10;
uint64 s11;
};
RISC-V寄存器
Caller Saved寄存器在函数调用的时候,C编译器自动将本次函数用到的寄存器入栈保存,在函数返回后自动弹栈恢复。
Callee Saved寄存器,需要在函数内部开头主动将自己回用到的寄存器保存,并在函数返回前主动恢复寄存器。
在context结构体中,保存了S0-S11寄存器,这些寄存器都是callee寄存器。
在这里,进程上下文中不仅包含了这些寄存器,还保存了ra寄存器和sp寄存器,分别保存了返回地址(用于进程恢复时指令从正确的位置开始运行),sp寄存器(用于进程被重新调度后内核栈正确恢复)
进程上下文 = callee saved寄存器+ra+sp
void yield(void)
{
struct proc *p = myproc();
acquire(&p->lock); // 获取进程的锁,保护p->state,防止在还没进入sched之前,其他cpu调度这个进程,这把锁后续在scheduler中被释放
p->state = RUNNABLE;
sched(); // sched,进行上下文切换,在这里会进入调度器进程
release(&p->lock); // 释放在scheduler中获取的锁
}
yield函数首先获取进程的锁来保护进程的状态,防止在还没进入sched之前,其他cpu调度这个进程。随后修改进程的状态为RUNNABLE,然后调用sched函数
当时钟中断到达时,首先通过devintr函数,针对中断类型进行分发,当返回值为2时,代表为时钟中断(注意在内核),最终在usertrap或kerneltrap中触发yield函数让出当前CPU的控制权。
// usertrap函数
// give up the CPU if this is a timer interrupt.
if(which_dev == 2)
yield();
// kerneltrap函数
// give up the CPU if this is a timer interrupt.
if(which_dev == 2 && myproc() != 0 && myproc()->state == RUNNING) // 注意这里判断了myproc()!=0,避免内核进程响应时钟中断,防止死锁
yield();
另外一个发生调度的地方在睡眠锁的使用中,也就是sleep(void *chan, struct spinlock *lk)
函数,这在blog3中已经介绍过。
void
sched(void)
{
int intena;
struct proc *p = myproc();
if(!holding(&p->lock))
panic("sched p->lock");
if(mycpu()->noff != 1)
panic("sched locks");
if(p->state == RUNNING)
panic("sched running");
if(intr_get())
panic("sched interruptible");
intena = mycpu()->intena;
swtch(&p->context, &mycpu()->context); // -> 进入内核进程,scheduler中
// 下一次调度回来从这里继续运行,恢复中断开关状态。
mycpu()->intena = intena;
}
首先对一些合法性进行判断,此时必须持有当前进程的锁,该cpu的锁的链表长度不超过1(否则会发生死锁问题),当前进程为运行态,中断是否已经关闭,记录当前CPU的初始的中断开关状态,然后调用swtch进程上下文切换,此时进入内核进程中(调度器进程)。
# Context switch
#
# void swtch(struct context *old, struct context *new);
#
# Save current registers in old. Load from new.
.globl swtch
swtch:
sd ra, 0(a0)
sd sp, 8(a0)
sd s0, 16(a0)
sd s1, 24(a0)
sd s2, 32(a0)
sd s3, 40(a0)
sd s4, 48(a0)
sd s5, 56(a0)
sd s6, 64(a0)
sd s7, 72(a0)
sd s8, 80(a0)
sd s9, 88(a0)
sd s10, 96(a0)
sd s11, 104(a0)
ld ra, 0(a1)
ld sp, 8(a1)
ld s0, 16(a1)
ld s1, 24(a1)
ld s2, 32(a1)
ld s3, 40(a1)
ld s4, 48(a1)
ld s5, 56(a1)
ld s6, 64(a1)
ld s7, 72(a1)
ld s8, 80(a1)
ld s9, 88(a1)
ld s10, 96(a1)
ld s11, 104(a1)
ret
函数原型:void swtch(struct context old, struct context new);
位于swtch.S,作用是将当前寄存器的值保存在old指向的context,加载new指向的context对应的寄存器值
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run.
// - swtch to start running that process.
// - eventually that process transfers control
// via swtch back to the scheduler.
void
scheduler(void)
{
struct proc *p;
struct cpu *c = mycpu();
c->proc = 0;
for(;;){
// Avoid deadlock by ensuring that devices can interrupt.
intr_on();
int found = 0;
for(p = proc; p < &proc[NPROC]; p++) {
acquire(&p->lock); // 这把锁在yield中被释放
if(p->state == RUNNABLE) {
// Switch to chosen process. It is the process's job
// to release its lock and then reacquire it
// before jumping back to us.
p->state = RUNNING;
c->proc = p;
// 切换成进程的内核页表副本
if(p->ptb_k) {
w_satp(MAKE_SATP(p->ptb_k));
sfence_vma();
}
swtch(&c->context, &p->context); //从这里进行上下文的切换,也就是从调度器进程切换到用户进程
// 当其他进程调用yield(时钟中断)或sleep后,第一次调用sched,进而调用swtch,从这里继续调度器进程
kvminithart(); // 需要及时恢复内核的页表
// Process is done running for now.
// It should have changed its p->state before coming back.
c->proc = 0;
found = 1;
}
release(&p->lock); //释放进程的锁,这把锁是在别的进程进入yield函数时获取的锁
}
#if !defined (LAB_FS)
if(found == 0) {
// kvminithart(); // 恢复内核的页表
intr_on();
asm volatile("wfi");
}
#else
;
#endif
}
}
scheduler每次从上一次调度到的进程,继续开始判断,遍历全局进程组,寻找进程的状态为RUNABLE的进程,进行调度。
这里借鉴一下6.S081——CPU调度部分(CPU的复用和调度)——xv6源码完全解析系列(10)_分析总结xv6中进程调度算法的实现-CSDN博客的图片
也就是调度的过程其实分为两步,先从用户进程切换至内核调度器进程,遍历进程组找到下一个可以调度的进程,然后再切换到对应的用户进程。
// start() jumps here in supervisor mode on all CPUs.
void
main()
{
if(cpuid() == 0){
consoleinit();
#if defined(LAB_PGTBL) || defined(LAB_LOCK)
statsinit();
#endif
printfinit();
printf("\n");
printf("xv6 kernel is booting\n");
printf("\n");
kinit(); // physical page allocator
kvminit(); // create kernel page table
kvminithart(); // turn on paging
procinit(); // process table
trapinit(); // trap vectors
trapinithart(); // install kernel trap vector
plicinit(); // set up interrupt controller
plicinithart(); // ask PLIC for device interrupts
binit(); // buffer cache
iinit(); // inode cache
fileinit(); // file table
virtio_disk_init(); // emulated hard disk
#ifdef LAB_NET
pci_init();
sockinit();
#endif
userinit(); // first user process
__sync_synchronize();
started = 1;
} else {
while(started == 0)
;
__sync_synchronize();
printf("hart %d starting\n", cpuid());
kvminithart(); // turn on paging
trapinithart(); // install kernel trap vector
plicinithart(); // ask PLIC for device interrupts
}
// 自此都转入scheduler函数,在scheduler函数中偶尔的中断并不会让
// 内核进程让出当前CPU,它会一直扫描可以调度的用户进程
scheduler();
}
在kernel/main.c中,所有的CPU最终都会运行到scheduler,也就是进入一个死循环,不断地调度。