[Xenomai] imx6q xenomai ipipe-3.0-imx6q

嵌入式工程师 ericvic at 163.com
Thu Apr 17 09:58:40 CEST 2014


I use the  ipipe-3.0-imx6q and xenomai-2.6.3 ,my board is imx6q 800MHz . And I analog my actual application.  when I use share memery in different  rt_tasks and bind with different cpus . The latency in 1000us at beginning  it is about 14us  but  when i run more tasks it will reach 150us or higher .My tasks run more contents than the blow examples .  I do not know if some thing wrong with my task.  And if copy some large file about 30M to /tmp  the latency will more higher . cp larg.tar.bz2 /tmp       (/tmp is memery directory )
result:
1task: 0.000421 ms                                                              
pmax: 0.014372 ms                                                               
1task: 0.057951 ms                                                              
1max: 0.153273 ms   


the example test.  Thank you very much




















#include <linux/module.h> 
#include <linux/types.h>  
#include <linux/fs.h>  
#include <linux/types.h>  
#include <linux/kernel.h>  
#include <linux/init.h>  
#include <linux/platform_device.h>  
#include <linux/cdev.h>  
#include <linux/ioctl.h>  
#include <linux/gpio.h>  
#include <linux/mm.h>  
#include <linux/errno.h>  
#include <asm/uaccess.h>  
#include <linux/slab.h>  
#include <asm/io.h> 
#include <linux/sched.h>  
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <nucleus/module.h>
#include <linux/string.h> 
#include <native/timer.h>
#include <native/alarm.h>
#include <native/task.h>
#include <native/sem.h>
#include <native/intr.h>
#include <native/heap.h>
#include <native/pipe.h>


#define TASK_PRIO 99 /* Highest RT priority */
#define TASK_MODE 0 /* No flags */
#define TASK_STKSZ 0 /* Stack size (use default one) */
RTIME now, previous;
RTIME pnow, pprevious;
RT_TASK t1,t2,t3;
RT_SEM  semt1,semt2,semt3;
RT_HEAP heap_desc;
long * mem_spvm; 
void *block;




MODULE_LICENSE("GPL");




static unsigned char logo[6957]={0};


long long tmp=0;


void taskOne(void*arg)
{
rt_task_set_periodic(NULL,TM_NOW,1000000);//1000us
long i;
long max,pmax;
int j;
previous = rt_timer_read();
pprevious=rt_timer_read();
while(1)
{
rt_sem_v(&semt2);


pnow = rt_timer_read();
if(pmax<(long)(pnow - pprevious-tmp))
pmax=(long)(pnow - pprevious-tmp);
if(j<10000)
pmax=0;
if(i%20000==0){
printk("1task: %d.%06d ms\n",
     (int)(pnow - pprevious-tmp) / 1000000,
     (int)(pnow - previous-tmp) % 1000000);
printk("pmax: %ld.%06ld ms\n",(long)pmax/1000000,(long)pmax%1000000);// first I read a max delay it is littile aboot 10us
}
memset(mem_spvm,0,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,255,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,0,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,255,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,0,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,255,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,0,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,255,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,0,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,255,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,0,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
memset(mem_spvm,255,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo));
now = rt_timer_read();
if(max<(long)(now - previous-tmp))
max=(long)(now - previous-tmp);
if(j++<10000)
max=0;
if(i++%20000==0){
printk("1task: %d.%06d ms\n",
     (int)(now - previous-tmp) / 1000000,
     (int)(now - previous-tmp) % 1000000);
printk("1max: %ld.%06ld ms\n",(long)max/1000000,(long)max%1000000);//the max delay about 154 us
printk("spvm6: %d \n",(char)mem_spvm[6]);
printk("spvm1: %d  ",(char)mem_spvm[sizeof(logo)]);
}
tmp=tmp+1000000; 
rt_task_wait_period(NULL);
}
}


void taskTwo(void*arg)
{
int i=0;
long max=0;
while(1)
{
rt_sem_p(&semt2,TM_INFINITE);
now = rt_timer_read();
memset(mem_spvm,255,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo)); 
if(i++%20000==0){
printk("2spvm6: %d \n",(char)mem_spvm[6]);
printk("2spvm1: %d\n",(char)mem_spvm[sizeof(logo)]);
}
  
rt_sem_v(&semt3);
}
}


void taskThree(void*arg)
{


int i;
while(1)
{
rt_sem_p(&semt3,TM_INFINITE);
memset(mem_spvm,255,sizeof(logo));
memcpy(mem_spvm,logo,sizeof(logo)); 
if(i++%20000==0){
printk("3spvm6: %d \n",(char)mem_spvm[6]);
printk("3spvm1: %d\n",(char)mem_spvm[sizeof(logo)]);
}
}
}




#define HEAP_SIZE (15*1024*1024)
                   
#define HEAP_MODE 0  /* Local heap. */


static int __init rtex_init(void){


int ret,err;




err =rt_heap_create(&heap_desc,"MyHeapName1",HEAP_SIZE,HEAP_MODE);
if (err)
printk("rt_heap_create wrong\n");
err = rt_heap_alloc(&heap_desc,1024*1024,TM_INFINITE,&block);
if (err)
printk("rt_heap_alloc wrong\n");
mem_spvm=(long*)block;
if(mem_spvm == NULL ) 
              printk("mem_spvm failed!\n"); 
        else 
              printk("mem_spvm successfully! \naddr = 0x%lx\n",(unsigned long)mem_spvm); 


 
rt_sem_create(&semt1,"",0,S_FIFO);
rt_sem_create(&semt2,"",0,S_FIFO);
rt_sem_create(&semt3,"",0,S_FIFO);


if(!rt_task_create(&t1, "task1", 4096, 99, T_FPU|T_CPU(0)))
rt_task_start(&t1, taskOne, NULL);
if(!rt_task_create(&t2, "task2", 4096, 85, T_FPU|T_CPU(1)))
rt_task_start(&t2, taskTwo, NULL);
if(!rt_task_create(&t3, "task3", 4096, 80, T_FPU|T_CPU(2)))
rt_task_start(&t3,taskThree, NULL);
       
     
  
 return ret; 


}


static void __exit rtex_exit(void)
{


rt_heap_free(&heap_desc,block);
rt_heap_delete(&heap_desc);
rt_task_delete(&t1);
rt_task_delete(&t2);
rt_task_delete(&t3);
printk("rtex_exit \n");
}
module_init(rtex_init);
module_exit(rtex_exit);




More information about the Xenomai mailing list