lkml.org 
[lkml]   [2008]   [Mar]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: Scalability requirements for sysv ipc
Mike Galbraith wrote:
> taskset -c 3 ./ctx -s
>
> 2.6.24.3
> 3766962 itterations in 9.999845 seconds = 376734/sec
>
> 2.6.22.18-cfs-v24.1
> 4375920 itterations in 10.006199 seconds = 437330/sec
>
> for i in 0 1 2 3; do taskset -c $i ./ctx -s& done
>
> 2.6.22.18-cfs-v24.1
> 4355784 itterations in 10.005670 seconds = 435361/sec
> 4396033 itterations in 10.005686 seconds = 439384/sec
> 4390027 itterations in 10.006511 seconds = 438739/sec
> 4383906 itterations in 10.006834 seconds = 438128/sec
>
> 2.6.24.3
> 1269937 itterations in 9.999757 seconds = 127006/sec
> 1266723 itterations in 9.999663 seconds = 126685/sec
> 1267293 itterations in 9.999348 seconds = 126742/sec
> 1265793 itterations in 9.999766 seconds = 126592/sec
>
>
Ouch - 71% slowdown with just 4 cores. Wow.
Attached are my own testapps: one for sysv msg, one for sysv sem.
Could you run them? Taskset is done internally, just execute

$ for i in 1 2 3 4;do ./psem $i 5;./pmsg $i 5;done

Only tested on uniprocessor, I hope the pthread_setaffinity works as
expected....

--
Manfred
/*
* pmsg.cpp, parallel sysv msg pingpong
*
* Copyright (C) 1999, 2001, 2005, 2008 by Manfred Spraul.
* All rights reserved except the rights granted by the GPL.
*
* Redistribution of this file is permitted under the terms of the GNU
* General Public License (GPL) version 2 or later.
* $Header$
*/

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <getopt.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <pthread.h>

//////////////////////////////////////////////////////////////////////////////

static enum {
WAITING,
RUNNING,
STOPPED,
} volatile g_state = WAITING;

unsigned long long *g_results;
int *g_svmsg_ids;
pthread_t *g_threads;

struct taskinfo {
int svmsg_id;
int threadid;
int sender;
};

#define DATASIZE 8

void* worker_thread(void *arg)
{
struct taskinfo *ti = (struct taskinfo*)arg;
unsigned long long rounds;
int ret;
struct {
long mtype;
char buffer[DATASIZE];
} mbuf;

{
cpu_set_t cpus;
CPU_ZERO(&cpus);
CPU_SET(ti->threadid/2, &cpus);
printf("ti: %d %lxh\n", ti->threadid/2, cpus.__bits[0]);

ret = pthread_setaffinity_np(g_threads[ti->threadid], sizeof(cpus), &cpus);
if (ret < 0) {
printf("pthread_setaffinity_np failed for thread %d with errno %d.\n",
ti->threadid, errno);
}

ret = pthread_getaffinity_np(g_threads[ti->threadid], sizeof(cpus), &cpus);
if (ret < 0) {
printf("pthread_getaffinity_np() failed for thread %d with errno %d.\n",
ti->threadid, errno);
fflush(stdout);
} else {
printf("thread %d: sysvmsg %8d type %d bound to %lxh\n",ti->threadid,
ti->svmsg_id, ti->sender, cpus.__bits[0]);
}
fflush(stdout);
}

rounds = 0;
while(g_state == WAITING) {
#ifdef __i386__
__asm__ __volatile__("pause": : :"memory");
#endif
}

if (ti->sender) {
mbuf.mtype = ti->sender+1;
ret = msgsnd(ti->svmsg_id, &mbuf, DATASIZE, 0);
if (ret != 0) {
printf("Initial send failed, errno %d.\n", errno);
exit(1);
}
}
while(g_state == RUNNING) {
int target = 1+!ti->sender;

ret = msgrcv(ti->svmsg_id, &mbuf, DATASIZE, target, 0);
if (ret != DATASIZE) {
if (errno == EIDRM)
break;
printf("Error on msgrcv, got %d, errno %d.\n", ret, errno);
exit(1);
}
mbuf.mtype = ti->sender+1;
ret = msgsnd(ti->svmsg_id, &mbuf, DATASIZE, 0);
if (ret != 0) {
if (errno == EIDRM)
break;
printf("send failed, errno %d.\n", errno);
exit(1);
}
rounds++;
}
/* store result */
g_results[ti->threadid] = rounds;

pthread_exit(0);
return NULL;
}

void init_thread(int thread1, int thread2)
{
int ret;
struct taskinfo *ti1, *ti2;

ti1 = new (struct taskinfo);
ti2 = new (struct taskinfo);
if (!ti1 || !ti2) {
printf("Could not allocate task info\n");
exit(1);
}

g_svmsg_ids[thread1] = msgget(IPC_PRIVATE,0777|IPC_CREAT);
if(g_svmsg_ids[thread1] == -1) {
printf(" message queue create failed.\n");
exit(1);
}
ti1->svmsg_id = g_svmsg_ids[thread1];
ti2->svmsg_id = ti1->svmsg_id;
ti1->threadid = thread1;
ti2->threadid = thread2;
ti1->sender = 1;
ti2->sender = 0;

ret = pthread_create(&g_threads[thread1], NULL, worker_thread, ti1);
if (ret) {
printf(" pthread_create failed with error code %d\n", ret);
exit(1);
}
ret = pthread_create(&g_threads[thread2], NULL, worker_thread, ti2);
if (ret) {
printf(" pthread_create failed with error code %d\n", ret);
exit(1);
}
}

//////////////////////////////////////////////////////////////////////////////

int main(int argc, char **argv)
{
int queues, timeout;
unsigned long long totals;
int i;

printf("pmsg [nr queues] [timeout]\n");
if (argc != 3) {
printf(" Invalid parameters.\n");
return 0;
}
queues = atoi(argv[1]);
timeout = atoi(argv[2]);
printf("Using %d queues (%d threads) for %d seconds.\n",
queues, 2*queues, timeout);

g_results = new unsigned long long[2*queues];
g_svmsg_ids = new int[queues];
g_threads = new pthread_t[2*queues];
for (i=0;i<queues;i++) {
g_results[i] = 0;
g_results[i+queues] = 0;
init_thread(i, i+queues);
}

sleep(1);
g_state = RUNNING;
sleep(timeout);
g_state = STOPPED;
sleep(1);
for (i=0;i<queues;i++) {
int res;
res = msgctl(g_svmsg_ids[i],IPC_RMID,NULL);
if (res < 0) {
printf("msgctl(IPC_RMID) failed for %d, errno%d.\n",
g_svmsg_ids[i], errno);
}
}
for (i=0;i<2*queues;i++)
pthread_join(g_threads[i], NULL);

printf("Result matrix:\n");
totals = 0;
for (i=0;i<queues;i++) {
printf(" Thread %3d: %8lld %3d: %8lld\n",
i, g_results[i], i+queues, g_results[i+queues]);
totals += g_results[i] + g_results[i+queues];
}
printf("Total: %lld\n", totals);
}
/*
* psem.cpp, parallel sysv sem pingpong
*
* Copyright (C) 1999, 2001, 2005, 2008 by Manfred Spraul.
* All rights reserved except the rights granted by the GPL.
*
* Redistribution of this file is permitted under the terms of the GNU
* General Public License (GPL) version 2 or later.
* $Header$
*/

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <getopt.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/sem.h>
#include <pthread.h>

//////////////////////////////////////////////////////////////////////////////

static enum {
WAITING,
RUNNING,
STOPPED,
} volatile g_state = WAITING;

unsigned long long *g_results;
int *g_svsem_ids;
pthread_t *g_threads;

struct taskinfo {
int svsem_id;
int threadid;
int sender;
};

#define DATASIZE 8

void* worker_thread(void *arg)
{
struct taskinfo *ti = (struct taskinfo*)arg;
unsigned long long rounds;
int ret;

{
cpu_set_t cpus;
CPU_ZERO(&cpus);
CPU_SET(ti->threadid/2, &cpus);
printf("ti: %d %lxh\n", ti->threadid/2, cpus.__bits[0]);

ret = pthread_setaffinity_np(g_threads[ti->threadid], sizeof(cpus), &cpus);
if (ret < 0) {
printf("pthread_setaffinity_np failed for thread %d with errno %d.\n",
ti->threadid, errno);
}

ret = pthread_getaffinity_np(g_threads[ti->threadid], sizeof(cpus), &cpus);
if (ret < 0) {
printf("pthread_getaffinity_np() failed for thread %d with errno %d.\n",
ti->threadid, errno);
fflush(stdout);
} else {
printf("thread %d: sysvsem %8d type %d bound to %lxh\n",ti->threadid,
ti->svsem_id, ti->sender, cpus.__bits[0]);
}
fflush(stdout);
}

rounds = 0;
while(g_state == WAITING) {
#ifdef __i386__
__asm__ __volatile__("pause": : :"memory");
#endif
}

if (ti->sender) {
struct sembuf sop[1];
int res;

/* 1) insert token */
sop[0].sem_num=0;
sop[0].sem_op=1;
sop[0].sem_flg=0;
res = semop(ti->svsem_id,sop,1);

if (ret != 0) {
printf("Initial semop failed, errno %d.\n", errno);
exit(1);
}
}
while(g_state == RUNNING) {
struct sembuf sop[1];
int res;

/* 1) retrieve token */
sop[0].sem_num=ti->sender;
sop[0].sem_op=-1;
sop[0].sem_flg=0;
res = semop(ti->svsem_id,sop,1);
if (ret != 0) {
/* EIDRM can happen */
if (errno == EIDRM)
break;
printf("main semop failed, errno %d.\n", errno);
exit(1);
}

/* 2) reinsert token */
sop[0].sem_num=1-ti->sender;
sop[0].sem_op=1;
sop[0].sem_flg=0;
res = semop(ti->svsem_id,sop,1);
if (ret != 0) {
/* EIDRM can happen */
if (errno == EIDRM)
break;
printf("main semop failed, errno %d.\n", errno);
exit(1);
}


rounds++;
}
g_results[ti->threadid] = rounds;

pthread_exit(0);
return NULL;
}

void init_thread(int thread1, int thread2)
{
int ret;
struct taskinfo *ti1, *ti2;

ti1 = new (struct taskinfo);
ti2 = new (struct taskinfo);
if (!ti1 || !ti2) {
printf("Could not allocate task info\n");
exit(1);
}
g_svsem_ids[thread1] = semget(IPC_PRIVATE,2,0777|IPC_CREAT);
if(g_svsem_ids[thread1] == -1) {
printf(" message queue create failed.\n");
exit(1);
}
ti1->svsem_id = g_svsem_ids[thread1];
ti2->svsem_id = ti1->svsem_id;
ti1->threadid = thread1;
ti2->threadid = thread2;
ti1->sender = 1;
ti2->sender = 0;

ret = pthread_create(&g_threads[thread1], NULL, worker_thread, ti1);
if (ret) {
printf(" pthread_create failed with error code %d\n", ret);
exit(1);
}
ret = pthread_create(&g_threads[thread2], NULL, worker_thread, ti2);
if (ret) {
printf(" pthread_create failed with error code %d\n", ret);
exit(1);
}
}

//////////////////////////////////////////////////////////////////////////////

int main(int argc, char **argv)
{
int queues, timeout;
unsigned long long totals;
int i;

printf("psem [nr queues] [timeout]\n");
if (argc != 3) {
printf(" Invalid parameters.\n");
return 0;
}
queues = atoi(argv[1]);
timeout = atoi(argv[2]);
printf("Using %d queues (%d threads) for %d seconds.\n",
queues, 2*queues, timeout);

g_results = new unsigned long long[2*queues];
g_svsem_ids = new int[queues];
g_threads = new pthread_t[2*queues];
for (i=0;i<queues;i++) {
g_results[i] = 0;
g_results[i+queues] = 0;
init_thread(i, i+queues);
}

sleep(1);
g_state = RUNNING;
sleep(timeout);
g_state = STOPPED;
sleep(1);
for (i=0;i<queues;i++) {
int res;
res = semctl(g_svsem_ids[i],1,IPC_RMID,NULL);
if (res < 0) {
printf("semctl(IPC_RMID) failed for %d, errno%d.\n",
g_svsem_ids[i], errno);
}
}
for (i=0;i<2*queues;i++)
pthread_join(g_threads[i], NULL);

printf("Result matrix:\n");
totals = 0;
for (i=0;i<queues;i++) {
printf(" Thread %3d: %8lld %3d: %8lld\n",
i, g_results[i], i+queues, g_results[i+queues]);
totals += g_results[i] + g_results[i+queues];
}
printf("Total: %lld\n", totals);
}
\
 
 \ /
  Last update: 2008-03-22 11:13    [W:0.242 / U:0.088 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site