- 论坛徽章:
- 0
|
各位好,请教个问题。
我想在2.6.32内核下,在内核态实现netlink的接收与处理信息分开进行。比如,一个函数收到用户发来的netlink后只负责唤醒处理线程,然后就返回。所有的处理工作交给处理线程去处理,这样就可以让用户态可以不停快速地发netlink消息到内核。看到一个在老的内核中实现的代码,如下:
#define BUF_SIZE 16384
static struct sock *netlink_exam_sock;
static unsigned char buffer[BUF_SIZE];
static unsigned int buffer_tail = 0;
static int exit_flag = 0;
static DECLARE_COMPLETION(exit_completion);
static void recv_handler(struct sock * sk, int length)
{
wake_up(sk->sk_sleep);
}
static int process_message_thread(void * data)
{
struct sk_buff * skb = NULL;
struct nlmsghdr * nlhdr = NULL;
int len;
DEFINE_WAIT(wait);
daemonize("mynetlink");
while (exit_flag == 0) {
prepare_to_wait(netlink_exam_sock->sk_sleep, &wait, TASK_INTERRUPTIBLE);
schedule();
finish_wait(netlink_exam_sock->sk_sleep, &wait);
while ((skb = skb_dequeue(&netlink_exam_sock->sk_receive_queue))
!= NULL) {
nlhdr = (struct nlmsghdr *)skb->data;
if (nlhdr->nlmsg_len < sizeof(struct nlmsghdr)) {
printk("Corrupt netlink message.\n");
continue;
}
len = nlhdr->nlmsg_len - NLMSG_LENGTH(0);
if (len + buffer_tail > BUF_SIZE) {
printk("netlink buffer is full.\n");
}
else {
memcpy(buffer + buffer_tail, NLMSG_DATA(nlhdr), len);
buffer_tail += len;
}
nlhdr->nlmsg_pid = 0;
nlhdr->nlmsg_flags = 0;
NETLINK_CB(skb).pid = 0;
NETLINK_CB(skb).dst_pid = 0;
NETLINK_CB(skb).dst_group = 1;
netlink_broadcast(netlink_exam_sock, skb, 0, 1, GFP_KERNEL);
}
}
complete(&exit_completion);
return 0;
}
static int __init netlink_exam_init(void)
{
netlink_exam_sock = netlink_kernel_create(NETLINK_GENERIC, 0, recv_handler, THIS_MODULE);
if (!netlink_exam_sock) {
printk("Fail to create netlink socket.\n");
return 1;
}
kernel_thread(process_message_thread, NULL, CLONE_KERNEL);
return 0;
}
static void __exit netlink_exam_exit(void)
{
exit_flag = 1;
wake_up(netlink_exam_sock->sk_sleep);
wait_for_completion(&exit_completion);
sock_release(netlink_exam_sock->sk_socket);
}
但是目前内核的函数API都已经改变了,182extern struct sock *netlink_kernel_create(struct net *net, int unit,unsigned int groups, void (*input)(struct sk_buff *skb),
struct mutex *cb_mutex, struct module *module);现在void(*input)(struct sk_buff *skb)的入参是sk_buff,而不是以前的struct sock *sk,
于是我将input函数修改如下:
static void recv_handler(struct sk_buff *skb)
{
wake_up(skb->sk->sk_sleep);
}
然后我在测试中发现,当用户层发送netlink到内核时,这个函数会执行并唤醒内核线程处理函数process_message_thread,在这个函数里每次都是执行到
while ((skb = skb_dequeue(&netlink_exam_sock->sk_receive_queue))
!= NULL) {
这里的时候,然后判断出发现skb是null,然后就退出了,所以一直无法处理netlink,请问各位大侠,在2.6.32内核下,该如何实现这个机制。
|
|