Hello Community,
I am trying to figure a simple way approximate cost of context switching for a given OS/hardware combination under different loads.
For example on my 2 GHz dual core MAC OSX, running single instance clocks at 0.5 usec where as three instance at 1.3 usec. Below is sample, appreciate feedback, is this a valid approximation? If not why? Code below is compiled/tested on MAC OSX.
#include <iostream>
#include <sched.h>
#include <sys/time.h>
class Timer
{
public:
Timer()
{
}
void start()
{
gettimeofday(&_start, NULL);
}
double elapsed()
{
timeval stop;
gettimeofday(&stop, NULL);
const int64_t USEC = 1000000;
int64_t t1=_start.tv_sec*USEC+_start.tv_usec;
int64_t t2=stop.tv_sec*USEC+stop.tv_usec;
return (double)t2-t1;
}
private:
timeval _start;
};
void yield(int64_t loop)
{
Timer timer;
timer.start();
for(int64_t i=0; i < loop; i++)
{
sched_yield();
}
double elapsed = timer.elapsed();
double avg = elapsed/loop;
std::cout << "yield loop=[" << loop << "] avg=[" << avg << "]" << std::endl;
}
int main(int argc, char *argv[])
{
int64_t loop = 100000000;
if( argc > 1 ) loop = atoi(argv[1]);
yield(loop);
}