/* * RT-Linux default scheduler * RTLinux has a modular scheduler and this may be replaced if desired. * * Written by Michael Barabanov, Victor Yodaiken * Copyright (C) VJY Associates LLC, 1998,1999 * Released under the terms of the GNU General Public License * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int rtl_sched_irq; DECLARE_TASK_QUEUE(rtl_tq_linux); #ifdef __SMP__ struct rtl_sched_cpu_struct rtl_sched [NR_CPUS]; #else struct rtl_sched_cpu_struct rtl_sched [1]; #endif /* # define rtl_printf(fac, args...) do ; while (0) */ int rtl_startup(void *(*fn)(void *), void *data) { void *retval; rtl_allow_interrupts(); retval = (*fn)(data); pthread_exit(retval); /* will never reach this line */ return 0; } void rtl_tq_sync (struct tq_struct *tq_s) { int cnt = 1000000; while (test_bit(0, &tq_s->sync)) { barrier(); cnt--; if (cnt < 0) { rtl_printf("timed out waiting for tq\n"); break; } } } int rtl_setclockmode (clockid_t clock, int mode, hrtime_t param) { int ret; rtl_irqstate_t flags; rtl_no_interrupts (flags); ret = clock->settimermode (clock, mode); if (ret != 0) { rtl_restore_interrupts (flags); return ret; } if (mode == RTL_CLOCK_MODE_ONESHOT) { param = HRTIME_INFINITY; } ret = clock->settimer (clock, param); rtl_restore_interrupts (flags); return ret; } void pthread_exit(void *retval) { rtl_irqstate_t flags; pthread_self() -> retval = retval; /* if (pthread_self() -> joining_thread) { pthread_self -> joining_thread -> commands |= WAKEME; delete thread resources(); } else { wait until somebody joins on this thread } */ rtl_no_interrupts(flags); pthread_self() -> state = RTL_THREAD_ZOMBIE; rtl_schedule(); /* will never reach this line */ rtl_restore_interrupts(flags); rtl_printf("pthread_exit() returned!\n"); } static void rtl_task_free_memory(void *p) { RTL_THREAD_STRUCT *task = (RTL_THREAD_STRUCT *) p; kfree (task->stack_bottom); kfree (task); } static void add_to_task_list(pthread_t thread) { #ifdef __SMP__ DECLARE_CPUID(cpu_id); if (thread->cpu != cpu_id) { struct tq_struct tq; tq . next = 0; tq . sync = 0; tq . data = thread; tq . routine = (void (*)(void *)) add_to_task_list; mb(); rtl_queue_task (&tq, &RTL_TQ(thread->cpu)); mb(); rtl_reschedule (thread->cpu); rtl_tq_sync (&tq); return; } #endif { schedule_t *s = LOCAL_SCHED; thread->next = s -> rtl_tasks; s->rtl_tasks = thread; } } static int remove_from_this_cpu(pthread_t thread) { int found = 0; RTL_THREAD_STRUCT *t; schedule_t *s; s = LOCAL_SCHED; #ifdef __SMP__ { DECLARE_CPUID(cpu_id); if (thread->cpu != cpu_id){ printk("RTL ERROR: remove_from this cpu crosses CPUs\n"); return ESRCH; } } #endif if (thread != s->rtl_tasks) { for (t = s->rtl_tasks; t; t = t->next) { if (t->next == thread) { t->next = thread->next; found = 1; break; } } if (!found) { return ESRCH; } } else { s->rtl_tasks = thread->next; } if (s->rtl_task_fpu_owner == thread) { s->rtl_task_fpu_owner = 0; } return 0; } #ifndef CONFIG_RTL_USE_V1_API int pthread_make_periodic_np (pthread_t p, hrtime_t start_time, hrtime_t period) { rtl_irqstate_t interrupt_state; #ifdef __SMP__ { DECLARE_CPUID(cpu_id); if (p->cpu != cpu_id){ printk("RTL ERROR: pthread_make_periodic_np crosses cpus\n"); return ESRCH; } } #endif rtl_no_interrupts(interrupt_state); p->resume_time = start_time; p->period = period; rtl_schedule(); rtl_restore_interrupts(interrupt_state); return 0; } int pthread_setperiod_np(pthread_t p, const struct itimerspec *value ) { #ifdef __SMP__ { DECLARE_CPUID(cpu_id); if (p->cpu != cpu_id){ printk("RTL ERROR: pthread_setperiod crosses cpus\n"); return ESRCH; } } #endif if (!timespec_nz(&value->it_value)) { pthread_make_periodic_np (p, HRTIME_INFINITY, 0); } else { pthread_make_periodic_np (p, timespec_to_ns (&value->it_value), timespec_to_ns (&value->it_interval)); } return 0; } #endif /* TODO */ int timer_gettime(timer_t timerid, struct itimerspec *value) { return EINVAL; } /* TODO */ int timer_getoverrun(timer_t timerid) { return EINVAL; } inline static RTL_THREAD_STRUCT * find_preemptor(schedule_t *s, RTL_THREAD_STRUCT *chosen){ RTL_THREAD_STRUCT *t; RTL_THREAD_STRUCT *preemptor=0; for (t = s->rtl_tasks; t; t = t->next) { if ( t->state == RTL_THREAD_DELAYED ) { if (t->sched_param.sched_priority > chosen->sched_param.sched_priority) { if(!preemptor ||(t->resume_time < preemptor->resume_time)) { preemptor = t; } } } } return preemptor; } void rtl_schedule (void) { schedule_t *sched; RTL_THREAD_STRUCT *t; RTL_THREAD_STRUCT *new_task; RTL_THREAD_STRUCT *preemptor = 0; unsigned long interrupt_state; hrtime_t now; sched = LOCAL_SCHED; new_task = &sched->rtl_linux_task; rtl_no_interrupts(interrupt_state); now = sched->clock->gethrtime(sched->clock); for (t = sched->rtl_tasks; t; t = t->next) { /* handle signals */ if (test_and_clear_bit(RTL_SIGNAL_SUSPEND, &t->pending_signals)) { t->state = RTL_THREAD_SUSPENDED; } /* expire timers */ if (t->state == RTL_THREAD_DELAYED) { if (now >= t->resume_time) { t->state = RTL_THREAD_READY; if (t->period != 0) { /* periodic */ t->resume_time += t->period; /* timer overrun */ ifdebug(OVERRUN) { while (now >= t->resume_time) { rtl_printf("Overrun: %d %d\n", now - t->resume_time, now - t->resume_time); t->resume_time += t->period; } } } else { t->resume_time = HRTIME_INFINITY; } } } /* and find highest priority runnable task */ if(t->state == RTL_THREAD_READY &&\ (t->sched_param.sched_priority > new_task->sched_param.sched_priority)) { new_task = t; } } if (sched->clock->mode == RTL_CLOCK_MODE_ONESHOT && ( preemptor = find_preemptor(sched,new_task))) { /* if (!sched->clock->arch.istimerset || preemptor->resume_time != sched->timerset) { */ sched->timerset = preemptor->resume_time; (sched->clock)->settimer(sched->clock, preemptor->resume_time - now); /* } */ } if (new_task != sched->rtl_current) { /* switch out old, switch in new */ if (new_task == &sched->rtl_linux_task) { rtl_make_rt_system_idle(); } else { rtl_make_rt_system_active(); } rtl_switch_to(&sched->rtl_current, new_task); /* delay switching the FPU context until it is really needed */ #ifdef CONFIG_RTL_FP_SUPPORT if (sched->rtl_current-> uses_fp &&\ sched->rtl_task_fpu_owner != sched->rtl_current) { if (sched->rtl_task_fpu_owner) { rtl_fpu_save (sched,sched->rtl_task_fpu_owner); } rtl_fpu_restore (sched,sched->rtl_current); sched->rtl_task_fpu_owner = sched->rtl_current; } #endif /* CONFIG_RTL_FP_SUPPORT */ } rtl_restore_interrupts(interrupt_state); } #ifdef __SMP__ static int cpu_exists (int cpu) { int n; int i; for (i = 0; i < rtl_num_cpus(); i++) { n = cpu_logical_map (i); if (n == cpu) { return 1; } } return 0; } #endif int pthread_suspend_np (pthread_t thread) { set_bit (RTL_SIGNAL_SUSPEND, &thread->pending_signals); /* #ifdef __SMP__ if (thread->cpu == rtl_getcpuid()) #endif rtl_schedule(); */ return 0; } /* can simplify that like pthread_suspend_np: set a bit and check in in * rtl_schedule() for both cross- and same-CPU operations */ int pthread_wakeup_np (pthread_t thread) { long interrupt_state; #ifdef __SMP__ DECLARE_CPUID(cpu_id); if (thread->cpu != cpu_id) { set_bit (RTL_SIGNAL_WAKEUP, &thread->pending_signals); rtl_reschedule(thread->cpu); return 0; } #endif rtl_no_interrupts(interrupt_state); thread->state = RTL_THREAD_READY; rtl_schedule(); rtl_restore_interrupts(interrupt_state); return 0; } int pthread_attr_setcpu_np(pthread_attr_t *attr, int cpu) { #ifdef __SMP__ if (!cpu_exists(cpu)) { return EINVAL; } #endif attr->cpu = cpu; return 0; } int pthread_create (pthread_t *thread, pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { int *st; long interrupt_state; RTL_THREAD_STRUCT *task; pthread_attr_t default_attr; int stack_size; if (!attr) { pthread_attr_init(&default_attr); attr = &default_attr; } #ifdef __SMP__ if (!cpu_exists(attr->cpu)) { return EAGAIN; } #endif stack_size = attr->stack_size; task = (RTL_THREAD_STRUCT *) kmalloc (sizeof(RTL_THREAD_STRUCT), GFP_KERNEL); if (!task) { return EAGAIN; } st = (int *) kmalloc(stack_size, GFP_KERNEL); if (!st) { kfree (task); return EAGAIN; } *thread = task; task->pending_signals = 0; task->retval = 0; task->free_task.next = 0; task->free_task.sync = 0; task->free_task.data = task; task->free_task.routine = rtl_task_free_memory; task->cpu = attr->cpu; task->resume_time = HRTIME_INFINITY; task->period = 0; task->sched_param = attr->sched_param; task->state = RTL_THREAD_READY; task->stack_bottom = st; task->stack = st + stack_size / sizeof(int); task->uses_fp = 0; x86_init_stack(task,start_routine, arg,rtl_startup); rtl_no_interrupts(interrupt_state); mb(); add_to_task_list(task); mb(); rtl_schedule(); rtl_restore_interrupts(interrupt_state); return 0; } #ifdef CONFIG_RTL_FP_SUPPORT int pthread_setfp_np (pthread_t thread, int flag) { DECLARE_CPUID(cpu_id); schedule_t *sched = LOCAL_SCHED; rtl_irqstate_t flags; if (cpu_id != thread->cpu) { rtl_printf("pthread_setfp_np() called on a wrong CPU!\n"); return EINVAL; } rtl_no_interrupts(flags); if (thread -> uses_fp != flag) { thread -> uses_fp = flag; if (flag) { rtl_task_init_fpu (thread, sched->rtl_task_fpu_owner); } else { if (sched->rtl_task_fpu_owner == thread) { sched->rtl_task_fpu_owner = 0; } } } rtl_restore_interrupts(flags); return 0; } #endif static void rtl_sched_timer_interrupt( struct pt_regs *regs) { rtl_schedule (); } int pthread_delete_np (pthread_t thread) { RTL_THREAD_STRUCT *task = thread; int ret; long interrupt_state; #ifdef __SMP__ DECLARE_CPUID(cpu_id); if (cpu_id == task->cpu) { #endif rtl_no_interrupts (interrupt_state); ret = remove_from_this_cpu (thread); if (ret != 0) { rtl_restore_interrupts (interrupt_state); return ret; } task->state = RTL_THREAD_ZOMBIE; rtl_queue_task (&task->free_task, &rtl_tq_linux); rtl_global_pend_irq (rtl_sched_irq); rtl_restore_interrupts (interrupt_state); #ifdef __SMP__ } else { /* the task is on the other CPU */ int cnt = 0; rtl_no_interrupts (interrupt_state); set_bit (RTL_SIGNAL_DELETE, &task->pending_signals); rtl_reschedule (task->cpu); rtl_restore_interrupts (interrupt_state); while (test_bit (RTL_SIGNAL_DELETE, &task->pending_signals)) { cnt++; if (cnt > 10000000) { rtl_printf ("RTL: safety count exceded while waiting for reschedule"); break; } } } #endif rtl_schedule(); return 0; } #ifdef RTL_DEBUG void rtl_dump_tasks(void) { schedule_t *sched = LOCAL_SCHED; RTL_THREAD_STRUCT *t; hrtime_t ts = sched->clock->gethrtime(sched->clock); rtl_printf("Tasks on CPU %d time = (%9d)\n", rtl_getcpuid(), ts); for (t = sched->rtl_tasks; t; t = t->next) { if (t == &sched->rtl_linux_task) { rtl_printf("linux task "); } if (t == &sched->idle_task) { rtl_printf("idle task "); } rtl_printf("addr=%08x state=%04x i=(%9d) p=(%9d)\n", t, t->state, t->resume_time, t->period); } } #endif int pthread_wait_np(void) { long interrupt_state; schedule_t *s = LOCAL_SCHED; rtl_no_interrupts(interrupt_state); s->rtl_current->state = RTL_THREAD_DELAYED; rtl_schedule (); /* "signal is delivered"; here we could get the overrun count? */ rtl_restore_interrupts(interrupt_state); return 0; } #ifdef __SMP__ unsigned int resched_irq(struct pt_regs *r) { DECLARE_CPUID (cpu_id); RTL_THREAD_STRUCT *t; for (t = sched_data(cpu_id)->rtl_tasks; t; t = t->next) { if (test_bit(RTL_SIGNAL_DELETE, &t->pending_signals)) { pthread_delete_np (t); /* MB: this is not a very good design -- if we call rtl_schedule() from rtl_pthread_delete(), we may deadlock */ clear_bit(RTL_SIGNAL_DELETE, &t->pending_signals); } if (test_bit(RTL_SIGNAL_WAKEUP, &t->pending_signals)) { t->state = RTL_THREAD_READY; clear_bit(RTL_SIGNAL_WAKEUP, &t->pending_signals); } } rtl_run_task_queue (&sched_data(cpu_id)->rtl_tq_cpu); rtl_schedule(); return 0; } #endif static void sched_irq_handler (int irq, void *dev_id, struct pt_regs *p) { rtl_move_to_linux_tq (&rtl_tq_linux, &tq_scheduler); } int init_module(void) { int interrupt_state; int i; int irq,my_cpu_id; schedule_t *s; DECLARE_CPUID(cpu_id); rtl_tq_linux = NULL; irq = rtl_get_soft_irq (sched_irq_handler, "RTL-scheduler"); if (irq > 0) { rtl_sched_irq = irq; } else { printk ("Can't get an irq for rtl scheduler"); return -EINVAL; } rtl_no_interrupts(interrupt_state); my_cpu_id = cpu_id; for (i = 0; i < rtl_num_cpus(); i++) { cpu_id = cpu_logical_map (i); s = &rtl_sched [cpu_id]; s -> rtl_current = &s->rtl_linux_task; s -> rtl_tasks = &s->rtl_linux_task; s -> rtl_linux_task . state = RTL_THREAD_READY; s -> rtl_linux_task . sched_param . sched_priority = -1; s -> rtl_linux_task . next = 0 /*&s->idle_task */; s -> rtl_linux_task . uses_fp = 1; s -> idle_task . state = RTL_THREAD_READY; s -> idle_task . sched_param . sched_priority = -2; s -> idle_task . uses_fp = 0; s -> idle_task . next = 0; s -> rtl_task_fpu_owner = &s->rtl_linux_task; s -> timerset = 0; #ifdef __SMP__ s -> rtl_tq_cpu = NULL; #endif s-> clock = rtl_getbestclock (cpu_id); if (s->clock && rtl_setclockhandler (s->clock, rtl_sched_timer_interrupt) == 0) { s->clock->init(s->clock); } else { printk("Can't get a clock for processor %d\n",cpu_id); return -EINVAL; } } cpu_id = my_cpu_id; #ifdef __SMP__ for (i = 0; i < rtl_num_cpus(); i++) { int cpu; int ret; cpu = cpu_logical_map (i); s = &rtl_sched [cpu]; ret = request_ipi(resched_irq, cpu); } #endif rtl_restore_interrupts (interrupt_state); /* rtl_setdebug (RTLDBG_ALL); */ return 0; } void cleanup_module(void) { int i; int cpu; schedule_t *s; rtl_free_soft_irq(rtl_sched_irq); for (i = 0; i < rtl_num_cpus(); i++) { cpu = cpu_logical_map (i); s = &rtl_sched [cpu]; s->clock->uninit(s->clock); #ifdef __SMP__ free_ipi(cpu); #endif } }