- 论坛徽章:
- 0
|
本人最近在学习Linux上的编程,对与多核cpu,利用处理器亲和,试了一下进程与cpu的绑定。对以前的命名管道通信的例子进行改进,让其与CPU绑定。
原命名管道例子有两个进程客户端和服务器端:
代码见附件如下:
client.h
#include<unistd.h>
#include<stdlib.h>
#include<stdio.h>
#include<string.h>
#include<fcntl.h>
#include<limits.h>
#include<sys/types.h>
#include<sys/stat.h>
#define SERVER_FIFO_NAME "/tmp/serv_fifo"
#define CLIENT_FIFO_NAME "/tmp/cli_%d_fifo"
#define BUFFER_SIZE 20
#ifndef DEBUG
#define DEBUG 1
#endif
struct data_to_pass_st
{
pid_t client_pid;
char some_data[BUFFER_SIZE + 1];
};
服务器端:server.c
#include "client.h"
#include "debug_rocky.h"
#include <ctype.h>
int main()
{
int server_fifo_fd, client_fifo_fd;
struct data_to_pass_st my_data;
int read_res;
char client_fifo[256];
char *tmp_char_ptr;
mkfifo(SERVER_FIFO_NAME, 0777);
server_fifo_fd = open(SERVER_FIFO_NAME, O_RDONLY);
if(server_fifo_fd == -1)
{
fprintf(stderr, "Server fifo failure\n");
exit(EXIT_FAILURE);
}
do
{
read_res = read(server_fifo_fd, &my_data, sizeof(my_data));
if(read_res > 0)
{
tmp_char_ptr = my_data.some_data;
while(*tmp_char_ptr)
{
*tmp_char_ptr = toupper(*tmp_char_ptr);
tmp_char_ptr ++;
}
sprintf(client_fifo, CLIENT_FIFO_NAME, my_data.client_pid);
client_fifo_fd = open(client_fifo, O_WRONLY);
if(client_fifo_fd != -1)
{
write(client_fifo_fd, &my_data, sizeof(my_data));
close(client_fifo_fd);
}
}
}
while(read_res > 0);
close(server_fifo_fd);
unlink(SERVER_FIFO_NAME);
exit(EXIT_SUCCESS);
}
客户端:client.c
#include "client.h"
#include "debug_rocky.h"
#include <ctype.h>
int main()
{
int server_fifo_fd, client_fifo_fd;
struct data_to_pass_st my_data;
int times_to_send;
char client_fifo[256];
int j = 0;
server_fifo_fd = open(SERVER_FIFO_NAME, O_WRONLY);
if(server_fifo_fd == -1)
{
fprintf(stderr, "Sorry, no server\n");
exit(EXIT_FAILURE);
}
my_data.client_pid = getpid();
sprintf(client_fifo, CLIENT_FIFO_NAME, my_data.client_pid);
if(mkfifo(client_fifo, 0777) == -1)
{
fprintf(stderr, "Sorry, can't make %s\n", client_fifo);
exit(EXIT_FAILURE);
}
for(times_to_send = 0; times_to_send < 1000000; times_to_send ++)
{
sprintf(my_data.some_data, "%d", j);
//printf("%d sent %s,", my_data.client_pid, my_data.some_data);
write(server_fifo_fd, &my_data, sizeof(my_data));
//printf("%d sent %s,", my_data.client_pid, my_data.some_data);
client_fifo_fd = open(client_fifo, O_RDONLY);
if(client_fifo_fd != -1)
{
if(read(client_fifo_fd, &my_data, sizeof(my_data)) > 0)
{
//printf("received : %s \n", my_data.some_data);
}
close(client_fifo_fd);
}
else
{
printf("client_fifo_fd ERROR \n");
}
j ++;
}
close(server_fifo_fd);
unlink(client_fifo);
exit(EXIT_SUCCESS);
}
运行结果正常。只是在client.c中,有时候将printf的打印信息去掉后,出现:Broken Pipe的错误。用GDB调试了一下,发现在server.c的“write(client_fifo_fd, &my_data, sizeof(my_data));”出错。查了一些资料,write会在管道关闭的时候写,会出错。可是在此处管道应该没有关闭。而将printf打印信息加上后,没有出现错误。希望高手给予指点。
对其进行改造,实现处理器绑定:
client_cpu.h
#include<unistd.h>
#include<stdlib.h>
#include<stdio.h>
#include<string.h>
#include<fcntl.h>
#include<limits.h>
#include<sys/types.h>
#include<sys/stat.h>
#include<sys/types.h>
#include<sys/sysinfo.h>
#define SERVER_FIFO_NAME "/tmp/serv_fifo"
#define CLIENT_FIFO_NAME "/tmp/cli_%d_fifo"
#define BUFFER_SIZE 20
struct data_to_pass_st
{
pid_t client_pid;
char some_data[BUFFER_SIZE + 1];
};
服务器端server_cpu.c
#include "client_cpu.h"
#define __USE_GNU
#include <sched.h>
#include <ctype.h>
int main(int argc, char* argv[])
{
int num = sysconf(_SC_NPROCESSORS_CONF);;
int server_fifo_fd, client_fifo_fd;
struct data_to_pass_st my_data;
int read_res;
char client_fifo[256];
char *tmp_char_ptr;
int cpu_id;
int cpu_gid;
int pid;
cpu_set_t mask;
cpu_set_t get;
if (argc != 2)
{
printf("usage : ./server_cpu num\n");
exit(1);
}
cpu_id = atoi(argv[1]);
CPU_ZERO(&mask);
CPU_SET(cpu_id, &mask);
if(sched_setaffinity(0, sizeof(mask), &mask) == -1)
{
printf("warning: could not set CPU affinity, continuing...\n");
}
pid = getpid();
CPU_ZERO(&get);
if(sched_getaffinity(0, sizeof(get), &get) == -1)
{
printf("warning: cound not get cpu affinity, continuing...\n");
}
for(cpu_gid = 0; cpu_gid < num; cpu_gid ++)
{
if(CPU_ISSET(cpu_gid, &get))
{
printf("system has %i processor(s). %d is running on processor %d\n", num, pid, cpu_gid);
}
}
mkfifo(SERVER_FIFO_NAME, 0777);
server_fifo_fd = open(SERVER_FIFO_NAME, O_RDONLY);
if(server_fifo_fd == -1)
{
fprintf(stderr, "Server fifo failure\n");
exit(EXIT_FAILURE);
}
//sleep(10);
do
{
read_res = read(server_fifo_fd, &my_data, sizeof(my_data));
if(read_res > 0)
{
tmp_char_ptr = my_data.some_data;
while(*tmp_char_ptr)
{
*tmp_char_ptr = toupper(*tmp_char_ptr);
tmp_char_ptr ++;
}
sprintf(client_fifo, CLIENT_FIFO_NAME, my_data.client_pid);
client_fifo_fd = open(client_fifo, O_WRONLY);
if(client_fifo_fd != -1)
{
write(client_fifo_fd, &my_data, sizeof(my_data));
close(client_fifo_fd);
}
}
}
while(read_res > 0);
close(server_fifo_fd);
unlink(SERVER_FIFO_NAME);
exit(EXIT_SUCCESS);
}
客户端:client_cpu.c
#include "client_cpu.h"
#define __USE_GNU
#include <sched.h>
#include <ctype.h>
int main(int argc, char* argv[])
{
int server_fifo_fd, client_fifo_fd;
struct data_to_pass_st my_data;
int times_to_send;
char client_fifo[256];
int cpu_id;
int cpu_gid;
int pid;
int num;
int j = 0;
cpu_set_t mask;
cpu_set_t get;
if (argc != 2)
{
printf("usage : ./client_cpu num\n");
exit(1);
}
cpu_id = atoi(argv[1]);
num = sysconf(_SC_NPROCESSORS_CONF);
pid = getpid();
CPU_ZERO(&mask);
CPU_SET(cpu_id, &mask);
if(sched_setaffinity(0, sizeof(mask), &mask) == -1)
{
printf("warning: could not set CPU affinity, continuing...\n");
}
CPU_ZERO(&get);
if(sched_getaffinity(0, sizeof(get), &get) == -1)
{
printf("warning: cound not get cpu affinity, continuing...\n");
}
for(cpu_gid = 0; cpu_gid < num; cpu_gid ++)
{
if(CPU_ISSET(cpu_gid, &get))
{
printf("system has %i processor(s). %d is running on processor %d\n", num, pid, cpu_gid);
}
}
server_fifo_fd = open(SERVER_FIFO_NAME, O_WRONLY);
if(server_fifo_fd == -1)
{
fprintf(stderr, "Sorry, no server\n");
exit(EXIT_FAILURE);
}
my_data.client_pid = pid;
sprintf(client_fifo, CLIENT_FIFO_NAME, my_data.client_pid);
if(mkfifo(client_fifo, 0777) == -1)
{
fprintf(stderr, "Sorry, can't make %s\n", client_fifo);
exit(EXIT_FAILURE);
}
for(times_to_send = 0; times_to_send < 1000000; times_to_send ++)
{
sprintf(my_data.some_data, "%d", j);
//printf("%d running on %d, sent %s,", my_data.client_pid, cpu_gid, my_data.some_data);
write(server_fifo_fd, &my_data, sizeof(my_data));
printf("%d running on %d, sent %s,", my_data.client_pid, cpu_gid, my_data.some_data);
client_fifo_fd = open(client_fifo, O_RDONLY);
if(client_fifo_fd != -1)
{
if(read(client_fifo_fd, &my_data, sizeof(my_data)) > 0)
{
printf("received : %s \n", my_data.some_data);
}
close(client_fifo_fd);
}
j ++;
}
close(server_fifo_fd);
unlink(client_fifo);
exit(EXIT_SUCCESS);
}
在双核上,运行结果却出人意料,让server_cpu运行在核0上,client_cpu运行在核1上,运行总是出现Broken Pipe错误。GDB定位后,还是在server_cpu.c 的“write(client_fifo_fd, &my_data, sizeof(my_data));“希望高手指点。 |
|