Xen事件通道实现通讯设计及说明
来源:互联网 发布:mac更改文件夹图标 编辑:程序博客网 时间:2024/05/20 01:38
在前面四篇文章介绍过Xen的事件通道机制后,设计一个简单的通讯样例,并进行原理说明
1、建立dom时完成事件通道的建立与初始化(vcpu绑定)
1.1 事件通道的初始化
domain_create中调用 evtchn_init完成事件通道初始化,evtchn_init中关键函数为get_free_port。在第一次初始化过程中,get_free_port()通过memset()将分配的结构体数组全部清零,其中包括结构体evtchn成员state的值。成员state值为0,意味着初始化后事件通道都处于未分配状态(ECS_FREE),并且notify_vcpu_id也为0,即所有的事件通道默认都和dom中vcpu 0进行绑定。
1.2 对xen的修改
为了实现事件通道的预分配,在创建domain时会对domain的事件通道初始化,函数为evtchn_init,对该函数进行修改如下:
int evtchn_init(struct domain *d)
{
//struct evtchn *lchn,*rchn;
//int i,j;
spin_lock_init(&d->event_lock);
if( get_free_port(d) != 0 )
return -EINVAL;
evtchn_from_port(d, 0)->state = ECS_RESERVED;
if(d->domain_id==0)
{
evtchn_from_port(d, 121)->state = ECS_UNBOUND;
evtchn_from_port(d, 121)->u.unbound.remote_domid =1;
evtchn_from_port(d, 122)->state = ECS_UNBOUND;
evtchn_from_port(d, 122)->u.unbound.remote_domid =2;
evtchn_from_port(d, 123)->state = ECS_UNBOUND;
evtchn_from_port(d, 123)->u.unbound.remote_domid =3;
evtchn_from_port(d, 124)->state = ECS_UNBOUND;
evtchn_from_port(d, 124)->u.unbound.remote_domid =4;
evtchn_from_port(d, 125)->state = ECS_UNBOUND;
evtchn_from_port(d, 125)->u.unbound.remote_domid =5;
evtchn_from_port(d, 126)->state = ECS_UNBOUND;
evtchn_from_port(d, 126)->u.unbound.remote_domid =6;
}
else if(d->domain_id == 1)
{
evtchn_from_port(d, 127)->state = ECS_INTERDOMAIN;
evtchn_from_port(d, 127)->u.interdomain.remote_dom = rcu_lock_domain_by_id(0);
evtchn_from_port(d, 127)->u.interdomain.remote_port = 121;
evtchn_from_port(rcu_lock_domain_by_id(0),121)->state = ECS_INTERDOMAIN;
evtchn_from_port(rcu_lock_domain_by_id(0),121)->u.interdomain.remote_dom = d;
evtchn_from_port(rcu_lock_domain_by_id(0),121)->u.interdomain.remote_port = 127;
}
else if(d->domain_id == 2)
{
evtchn_from_port(d, 127)->state = ECS_INTERDOMAIN;
evtchn_from_port(d, 127)->u.interdomain.remote_dom = rcu_lock_domain_by_id(0);
evtchn_from_port(d, 127)->u.interdomain.remote_port = 122;
evtchn_from_port(rcu_lock_domain_by_id(0),122)->state = ECS_INTERDOMAIN;
evtchn_from_port(rcu_lock_domain_by_id(0),122)->u.interdomain.remote_dom = d;
evtchn_from_port(rcu_lock_domain_by_id(0),122)->u.interdomain.remote_port = 127;
}
else if(d->domain_id == 3)
{
evtchn_from_port(d, 127)->state = ECS_INTERDOMAIN;
evtchn_from_port(d, 127)->u.interdomain.remote_dom = rcu_lock_domain_by_id(0);
evtchn_from_port(d, 127)->u.interdomain.remote_port = 123;
evtchn_from_port(rcu_lock_domain_by_id(0),123)->state = ECS_INTERDOMAIN;
evtchn_from_port(rcu_lock_domain_by_id(0),123)->u.interdomain.remote_dom = d;
evtchn_from_port(rcu_lock_domain_by_id(0),123)->u.interdomain.remote_port = 127;
}
else if(d->domain_id == 4)
{
evtchn_from_port(d, 127)->state = ECS_INTERDOMAIN;
evtchn_from_port(d, 127)->u.interdomain.remote_dom = rcu_lock_domain_by_id(0);
evtchn_from_port(d, 127)->u.interdomain.remote_port = 124;
evtchn_from_port(rcu_lock_domain_by_id(0),124)->state = ECS_INTERDOMAIN;
evtchn_from_port(rcu_lock_domain_by_id(0),124)->u.interdomain.remote_dom = d;
evtchn_from_port(rcu_lock_domain_by_id(0),124)->u.interdomain.remote_port = 127;
}
else if(d->domain_id == 5)
{
evtchn_from_port(d, 127)->state = ECS_INTERDOMAIN;
evtchn_from_port(d, 127)->u.interdomain.remote_dom = rcu_lock_domain_by_id(0);
evtchn_from_port(d, 127)->u.interdomain.remote_port = 125;
evtchn_from_port(rcu_lock_domain_by_id(0),125)->state = ECS_INTERDOMAIN;
evtchn_from_port(rcu_lock_domain_by_id(0),125)->u.interdomain.remote_dom = d;
evtchn_from_port(rcu_lock_domain_by_id(0),125)->u.interdomain.remote_port = 127;
}
else if(d->domain_id == 6)
{
evtchn_from_port(d, 127)->state = ECS_INTERDOMAIN;
evtchn_from_port(d, 127)->u.interdomain.remote_dom = rcu_lock_domain_by_id(0);
evtchn_from_port(d, 127)->u.interdomain.remote_port = 126;
evtchn_from_port(rcu_lock_domain_by_id(0),126)->state = ECS_INTERDOMAIN;
evtchn_from_port(rcu_lock_domain_by_id(0),126)->u.interdomain.remote_dom = d;
evtchn_from_port(rcu_lock_domain_by_id(0),126)->u.interdomain.remote_port = 127;
}
#if MAX_VIRT_CPUS > BITS_PER_LONG
d->poll_mask = xmalloc_array(unsigned long, BITS_TO_LONGS(MAX_VIRT_CPUS));
if ( !d->poll_mask )
return -ENOMEM;
bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
#endif
return 0;
}
在domain 0被创建时预留下事件通道用于域间绑定,在domain U被创建时进行域间绑定操作。
2、建立域间通讯
2.1 dom 0与 dom U事件通道分配
如1.2所示,在xen中已经实现了事件通道的分配,dom 0分配121~126号通道分别于dom 1~dom 6建立域间通讯,当dom 1~dom 6启动时执行域间绑定过程
2.2为分配的事件通道绑定处理函数
每一个事件通道处理都对应一个中断,因此一旦事件到达,会根据事件通道的中断号调用对应的中断处理函数,所以在使用之前也要绑定处理函数。其中evtchn为alloc_unbound.port。处理函数handler根据自己想要实现的功能设计。
int bind_evtchn_to_irqhandler(unsigned int evtchn,
irq_handler_t handler,
unsigned long irqflags,
const char *devname, void *dev_id)
{
int irq, retval;
irq = bind_evtchn_to_irq(evtchn);
if (irq < 0)
return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
return retval;
}
return irq;
}
3、利用事件通道来发送通知
发送事件通知的函数为notify_remote_via_evtchn,这是linux内核中的函数,实际也是通过超级调用请求xen进行的通知。
static inline void notify_remote_via_evtchn(int port)
{
struct evtchn_send send = { .port = port };
(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
}
4、收到事件通知的处理
每一个事件通道绑定完vcpu后,一旦有事件通知就会触发该vcpu的调度,执行下面的汇编代码
ENTRY(xen_hypervisor_callback)
CFI_STARTPROC
pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
TRACE_IRQS_OFF
/* Check to see if we got the event in the critical
region in xen_iret_direct, after we've reenabled
events and checked for pending events. This simulates
iret instruction's behaviour where it delivers a
pending interrupt when enabling interrupts. */
movl PT_EIP(%esp),%eax
cmpl $xen_iret_start_crit,%eax
jb 1f
cmpl $xen_iret_end_crit,%eax
jae 1f
jmp xen_iret_crit_fixup
ENTRY(xen_do_upcall)
1: mov %esp, %eax
call xen_evtchn_do_upcall
jmp ret_from_intr
CFI_ENDPROC
ENDPROC(xen_hypervisor_callback
在上面的代码中选择处理函数时都会进入xen_evtchn_do_upcall,该函数会根据事件通道的中断号,调用对应的中断处理函数。其实现如下
void xen_evtchn_do_upcall(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
exit_idle();
__xen_evtchn_do_upcall();
irq_exit();
set_irq_regs(old_regs);
}
static void __xen_evtchn_do_upcall(void)
{
int start_word_idx, start_bit_idx;
int word_idx, bit_idx;
int i;
int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
unsigned count;
do {
unsigned long pending_words;
vcpu_info->evtchn_upcall_pending = 0;
if (__this_cpu_inc_return(xed_nesting_count) - 1)
goto out;
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
/* Clear master flag /before/ clearing selector flag. */
wmb();
#endif
pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
start_word_idx = __this_cpu_read(current_word_idx);
start_bit_idx = __this_cpu_read(current_bit_idx);
word_idx = start_word_idx;
for (i = 0; pending_words != 0; i++) {
unsigned long pending_bits;
unsigned long words;
words = MASK_LSBS(pending_words, word_idx);
/*
* If we masked out all events, wrap to beginning.
*/
if (words == 0) {
word_idx = 0;
bit_idx = 0;
continue;
}
word_idx = __ffs(words);
pending_bits = active_evtchns(cpu, s, word_idx);
bit_idx = 0; /* usually scan entire word from start */
if (word_idx == start_word_idx) {
/* We scan the starting word in two parts */
if (i == 0)
/* 1st time: start in the middle */
bit_idx = start_bit_idx;
else
/* 2nd time: mask bits done already */
bit_idx &= (1UL << start_bit_idx) - 1;
}
do {
unsigned long bits;
int port, irq;
struct irq_desc *desc;
bits = MASK_LSBS(pending_bits, bit_idx);
/* If we masked out all events, move on. */
if (bits == 0)
break;
bit_idx = __ffs(bits);
/* Process port. */
port = (word_idx * BITS_PER_LONG) + bit_idx;
irq = evtchn_to_irq[port];
if (irq != -1) {
desc = irq_to_desc(irq);
if (desc)
generic_handle_irq_desc(irq, desc);
}
bit_idx = (bit_idx + 1) % BITS_PER_LONG;
/* Next caller starts at last processed + 1 */
__this_cpu_write(current_word_idx,
bit_idx ? word_idx :
(word_idx+1) % BITS_PER_LONG);
__this_cpu_write(current_bit_idx, bit_idx);
} while (bit_idx != 0);
/* Scan start_l1i twice; all others once. */
if ((word_idx != start_word_idx) || (i != 0))
pending_words &= ~(1UL << word_idx);
word_idx = (word_idx + 1) % BITS_PER_LONG;
}
BUG_ON(!irqs_disabled());
count = __this_cpu_read(xed_nesting_count);
__this_cpu_write(xed_nesting_count, 0);
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
out:
put_cpu();
}
- Xen事件通道实现通讯设计及说明
- xen的事件通道
- Xen事件通道详细介绍(一)
- Xen事件通道详细介绍(二)
- Xen事件通道详细介绍(三)
- Xen事件通道详细介绍(四)
- xen事件通道机制及其实例
- Xen 超级调用 和 事件通道
- NRF24L01多通道通讯
- rabbitmq 连接、通道及线程池说明和配置
- Xen半虚拟化下,IO共享环、事件通道、授权表之间的联系和区别
- Xen前后端分离设备驱动模型通信过程分析(包括事件通道、授权表、环缓冲区)
- Xen半虚拟化下,IO共享环、事件通道、授权表之间的联系和区别
- [连载]《C#通讯(串口和网络)框架的设计与实现》- 12.二次开发及应用
- 使用事件通道
- java远程通讯及简单实现
- UDP通讯方式及编程实现步骤
- UDP通讯方式及编程实现步骤
- Linux 下CollabNet SubversionEdge 4.X (csvn)的安装
- js下为表格内部动态添加行的代码
- 使用GDB调试程序(2)
- 2014~2015年秋学期总结与展望
- swift学习资源整理
- Xen事件通道实现通讯设计及说明
- 谈谈LoadRunner中的关联
- 解决This Android SDK requires Android Developer Toolkit version 23.0.0 or above和ADT更新失败
- 函数指针函数名转换成内存地址调用
- C/C++ 使用递归算法实现汉诺塔
- 查看Mysql数据库连接状态
- mongoDB的读书笔记(04)_【Replica】(05)_初探Replica set副本集的搭建 By Test模式
- H265封装成TS流
- 利用开源工具Tesseract进行文字识别