Fixed whitespaces.
git-svn-id: https://www4.informatik.uni-erlangen.de/i4svn/danceos/trunk/devel/fail@2067 8c4709b5-6ec9-48aa-a5cd-a96041d1645a
This commit is contained in:
@ -40,7 +40,7 @@ boost::mutex CommThread::m_CommMutex;
|
|||||||
|
|
||||||
ExperimentData *JobServer::getDone()
|
ExperimentData *JobServer::getDone()
|
||||||
{
|
{
|
||||||
|
|
||||||
#ifndef __puma
|
#ifndef __puma
|
||||||
if (m_undoneJobs.Size() == 0
|
if (m_undoneJobs.Size() == 0
|
||||||
&& noMoreExperiments()
|
&& noMoreExperiments()
|
||||||
@ -49,7 +49,7 @@ ExperimentData *JobServer::getDone()
|
|||||||
&& m_inOutCounter.getValue() == 0) {
|
&& m_inOutCounter.getValue() == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ExperimentData *exp = NULL;
|
ExperimentData *exp = NULL;
|
||||||
exp = m_doneJobs.Dequeue();
|
exp = m_doneJobs.Dequeue();
|
||||||
m_inOutCounter.decrement();
|
m_inOutCounter.decrement();
|
||||||
@ -115,7 +115,7 @@ void JobServer::run()
|
|||||||
{
|
{
|
||||||
struct sockaddr_in clientaddr;
|
struct sockaddr_in clientaddr;
|
||||||
socklen_t clen = sizeof(clientaddr);
|
socklen_t clen = sizeof(clientaddr);
|
||||||
|
|
||||||
// implementation of server-client communication
|
// implementation of server-client communication
|
||||||
int s;
|
int s;
|
||||||
if ((s = socket(AF_INET, SOCK_STREAM, 0)) == -1) {
|
if ((s = socket(AF_INET, SOCK_STREAM, 0)) == -1) {
|
||||||
@ -131,20 +131,20 @@ void JobServer::run()
|
|||||||
// TODO: Log-level?
|
// TODO: Log-level?
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* IPv4, bind to all interfaces */
|
/* IPv4, bind to all interfaces */
|
||||||
struct sockaddr_in saddr;
|
struct sockaddr_in saddr;
|
||||||
saddr.sin_family = AF_INET;
|
saddr.sin_family = AF_INET;
|
||||||
saddr.sin_port = htons(m_port);
|
saddr.sin_port = htons(m_port);
|
||||||
saddr.sin_addr.s_addr = htons(INADDR_ANY);
|
saddr.sin_addr.s_addr = htons(INADDR_ANY);
|
||||||
|
|
||||||
/* bind to port */
|
/* bind to port */
|
||||||
if (::bind(s, (struct sockaddr*) &saddr, sizeof(saddr)) == -1) {
|
if (::bind(s, (struct sockaddr*) &saddr, sizeof(saddr)) == -1) {
|
||||||
perror("bind");
|
perror("bind");
|
||||||
// TODO: Log-level?
|
// TODO: Log-level?
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Listen with a backlog of maxThreads */
|
/* Listen with a backlog of maxThreads */
|
||||||
if (listen(s, m_maxThreads) == -1) {
|
if (listen(s, m_maxThreads) == -1) {
|
||||||
perror("listen");
|
perror("listen");
|
||||||
@ -156,7 +156,7 @@ void JobServer::run()
|
|||||||
#ifndef __puma
|
#ifndef __puma
|
||||||
boost::thread* th;
|
boost::thread* th;
|
||||||
while (!m_finish){
|
while (!m_finish){
|
||||||
// Accept connection
|
// Accept connection
|
||||||
int cs = accept(s, (struct sockaddr*)&clientaddr, &clen);
|
int cs = accept(s, (struct sockaddr*)&clientaddr, &clen);
|
||||||
if (cs == -1) {
|
if (cs == -1) {
|
||||||
perror("accept");
|
perror("accept");
|
||||||
@ -173,7 +173,7 @@ void JobServer::run()
|
|||||||
m_threadlist.remove_if(timed_join_successful(m_threadtimeout));
|
m_threadlist.remove_if(timed_join_successful(m_threadtimeout));
|
||||||
} while (m_threadlist.size() == m_maxThreads);
|
} while (m_threadlist.size() == m_maxThreads);
|
||||||
}
|
}
|
||||||
// Start new thread
|
// Start new thread
|
||||||
th = new boost::thread(CommThread(cs, *this));
|
th = new boost::thread(CommThread(cs, *this));
|
||||||
m_threadlist.push_back(th);
|
m_threadlist.push_back(th);
|
||||||
}
|
}
|
||||||
@ -243,13 +243,13 @@ void CommThread::sendPendingExperimentData(Minion& minion)
|
|||||||
std::deque<ExperimentData*> exp;
|
std::deque<ExperimentData*> exp;
|
||||||
ExperimentData* temp_exp = 0;
|
ExperimentData* temp_exp = 0;
|
||||||
FailControlMessage ctrlmsg;
|
FailControlMessage ctrlmsg;
|
||||||
|
|
||||||
ctrlmsg.set_build_id(42);
|
ctrlmsg.set_build_id(42);
|
||||||
ctrlmsg.set_run_id(m_js.m_runid);
|
ctrlmsg.set_run_id(m_js.m_runid);
|
||||||
ctrlmsg.set_command(FailControlMessage::WORK_FOLLOWS);
|
ctrlmsg.set_command(FailControlMessage::WORK_FOLLOWS);
|
||||||
|
|
||||||
for (i = 0; i < m_job_size ; i++) {
|
for (i = 0; i < m_job_size ; i++) {
|
||||||
if (m_js.m_undoneJobs.Dequeue_nb(temp_exp) == true) {
|
if (m_js.m_undoneJobs.Dequeue_nb(temp_exp) == true) {
|
||||||
// Got an element from queue, assign ID to workload and send to minion
|
// Got an element from queue, assign ID to workload and send to minion
|
||||||
workloadID = m_js.m_counter.increment(); // increment workload counter
|
workloadID = m_js.m_counter.increment(); // increment workload counter
|
||||||
temp_exp->setWorkloadID(workloadID); // store ID for identification when receiving result
|
temp_exp->setWorkloadID(workloadID); // store ID for identification when receiving result
|
||||||
@ -258,21 +258,21 @@ void CommThread::sendPendingExperimentData(Minion& minion)
|
|||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!m_js.m_runningJobs.insert(workloadID, temp_exp)) {
|
if (!m_js.m_runningJobs.insert(workloadID, temp_exp)) {
|
||||||
cout << "!![Server]could not insert workload id: [" << workloadID << "] double entry?" << endl;
|
cout << "!![Server]could not insert workload id: [" << workloadID << "] double entry?" << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (exp.size() != 0) {
|
if (exp.size() != 0) {
|
||||||
ctrlmsg.set_job_size(exp.size());
|
ctrlmsg.set_job_size(exp.size());
|
||||||
|
|
||||||
cout << " >>[";
|
cout << " >>[";
|
||||||
for ( i = 0; i < exp.size() ; i++) {
|
for ( i = 0; i < exp.size() ; i++) {
|
||||||
cout << " "<< ctrlmsg.workloadid(i) <<" ";
|
cout << " "<< ctrlmsg.workloadid(i) <<" ";
|
||||||
}
|
}
|
||||||
cout << "] " << flush;
|
cout << "] " << flush;
|
||||||
|
|
||||||
|
|
||||||
if (SocketComm::sendMsg(minion.getSocketDescriptor(), ctrlmsg)) {
|
if (SocketComm::sendMsg(minion.getSocketDescriptor(), ctrlmsg)) {
|
||||||
for (i = 0; i < ctrlmsg.job_size() ; i++) {
|
for (i = 0; i < ctrlmsg.job_size() ; i++) {
|
||||||
if (SocketComm::sendMsg(minion.getSocketDescriptor(), exp.front()->getMessage())) {
|
if (SocketComm::sendMsg(minion.getSocketDescriptor(), exp.front()->getMessage())) {
|
||||||
@ -280,7 +280,7 @@ void CommThread::sendPendingExperimentData(Minion& minion)
|
|||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
@ -314,7 +314,7 @@ void CommThread::sendPendingExperimentData(Minion& minion)
|
|||||||
if (SocketComm::sendMsg(minion.getSocketDescriptor(), ctrlmsg)) {
|
if (SocketComm::sendMsg(minion.getSocketDescriptor(), ctrlmsg)) {
|
||||||
SocketComm::sendMsg(minion.getSocketDescriptor(), temp_exp->getMessage());
|
SocketComm::sendMsg(minion.getSocketDescriptor(), temp_exp->getMessage());
|
||||||
}
|
}
|
||||||
} else if (m_js.noMoreExperiments() == false) {
|
} else if (m_js.noMoreExperiments() == false) {
|
||||||
// Currently we have no workload (even the running-job-queue is empty!), but
|
// Currently we have no workload (even the running-job-queue is empty!), but
|
||||||
// the campaign is not over yet. Minion can try again later.
|
// the campaign is not over yet. Minion can try again later.
|
||||||
ctrlmsg.set_command(FailControlMessage::COME_AGAIN);
|
ctrlmsg.set_command(FailControlMessage::COME_AGAIN);
|
||||||
@ -362,7 +362,7 @@ void CommThread::receiveExperimentResults(Minion& minion, FailControlMessage& ct
|
|||||||
// we (may) distribute the (running) jobs to a *few* experiment-clients.
|
// we (may) distribute the (running) jobs to a *few* experiment-clients.
|
||||||
cout << "[Server] Received another result for workload id ["
|
cout << "[Server] Received another result for workload id ["
|
||||||
<< ctrlmsg.workloadid(i) << "] -- ignored." << endl;
|
<< ctrlmsg.workloadid(i) << "] -- ignored." << endl;
|
||||||
|
|
||||||
// TODO: Any need for error-handling here?
|
// TODO: Any need for error-handling here?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,21 +18,21 @@
|
|||||||
namespace fail {
|
namespace fail {
|
||||||
|
|
||||||
class CommThread;
|
class CommThread;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \class JobServer
|
* \class JobServer
|
||||||
* The server supplies the Minions with ExperimentData's and receives the result data.
|
* The server supplies the Minions with ExperimentData's and receives the result data.
|
||||||
*
|
*
|
||||||
* Manages the campaigns parameter distributions. The Campaign Controller can add
|
* Manages the campaigns parameter distributions. The Campaign Controller can add
|
||||||
* experiment parameter sets, which the Jobserver will distribute to requesting
|
* experiment parameter sets, which the Jobserver will distribute to requesting
|
||||||
* clients. The campaign controller can wait for all results, or a timeout.
|
* clients. The campaign controller can wait for all results, or a timeout.
|
||||||
*/
|
*/
|
||||||
class JobServer {
|
class JobServer {
|
||||||
private:
|
private:
|
||||||
//! The TCP Port number
|
//! The TCP Port number
|
||||||
int m_port;
|
int m_port;
|
||||||
//! TODO nice termination concept
|
//! TODO nice termination concept
|
||||||
bool m_finish;
|
bool m_finish;
|
||||||
//! Campaign signaled last expirement data set
|
//! Campaign signaled last expirement data set
|
||||||
bool m_noMoreExps;
|
bool m_noMoreExps;
|
||||||
//! the maximal number of threads spawned for TCP communication
|
//! the maximal number of threads spawned for TCP communication
|
||||||
@ -43,7 +43,7 @@ private:
|
|||||||
#ifndef __puma
|
#ifndef __puma
|
||||||
typedef std::list<boost::thread*> Tthreadlist;
|
typedef std::list<boost::thread*> Tthreadlist;
|
||||||
Tthreadlist m_threadlist;
|
Tthreadlist m_threadlist;
|
||||||
|
|
||||||
boost::thread* m_serverThread;
|
boost::thread* m_serverThread;
|
||||||
#endif // puma
|
#endif // puma
|
||||||
|
|
||||||
@ -77,13 +77,13 @@ private:
|
|||||||
#endif
|
#endif
|
||||||
void sendWork(int sockfd);
|
void sendWork(int sockfd);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
JobServer(int port = SERVER_COMM_TCP_PORT) : m_port(port), m_finish(false), m_noMoreExps(false),
|
JobServer(int port = SERVER_COMM_TCP_PORT) : m_port(port), m_finish(false), m_noMoreExps(false),
|
||||||
m_maxThreads(128), m_threadtimeout(0), m_undoneJobs(SERVER_OUT_QUEUE_SIZE)
|
m_maxThreads(128), m_threadtimeout(0), m_undoneJobs(SERVER_OUT_QUEUE_SIZE)
|
||||||
{
|
{
|
||||||
m_runid = std::time(0);
|
m_runid = std::time(0);
|
||||||
#ifndef __puma
|
#ifndef __puma
|
||||||
m_serverThread = new boost::thread(&JobServer::run, this); // run operator()() in a thread.
|
m_serverThread = new boost::thread(&JobServer::run, this); // run operator()() in a thread.
|
||||||
#ifdef SERVER_PERFORMANCE_MEASURE
|
#ifdef SERVER_PERFORMANCE_MEASURE
|
||||||
m_measureThread = new boost::thread(&JobServer::measure, this);
|
m_measureThread = new boost::thread(&JobServer::measure, this);
|
||||||
#endif
|
#endif
|
||||||
@ -123,8 +123,8 @@ public:
|
|||||||
* @see setNoMoreExperiments
|
* @see setNoMoreExperiments
|
||||||
*/
|
*/
|
||||||
bool noMoreExperiments() const { return m_noMoreExps; }
|
bool noMoreExperiments() const { return m_noMoreExps; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The Campaign Controller can signalize, that the jobserver can
|
* The Campaign Controller can signalize, that the jobserver can
|
||||||
* stop listening for client connections.
|
* stop listening for client connections.
|
||||||
*/
|
*/
|
||||||
@ -134,7 +134,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
* @class CommThread
|
* @class CommThread
|
||||||
* Implementation of the communication threads.
|
* Implementation of the communication threads.
|
||||||
* This class implements the actual communication
|
* This class implements the actual communication
|
||||||
* with the minions.
|
* with the minions.
|
||||||
*/
|
*/
|
||||||
class CommThread {
|
class CommThread {
|
||||||
@ -154,10 +154,10 @@ private:
|
|||||||
/**
|
/**
|
||||||
* Called after minion offers a result message.
|
* Called after minion offers a result message.
|
||||||
* Evaluates the Workload ID and puts the corresponding
|
* Evaluates the Workload ID and puts the corresponding
|
||||||
* job result into the result queue.
|
* job result into the result queue.
|
||||||
* @param minion The minion offering results
|
* @param minion The minion offering results
|
||||||
* @param workloadID The workload id of the result message
|
* @param workloadID The workload id of the result message
|
||||||
*/
|
*/
|
||||||
void receiveExperimentResults(Minion& minion, FailControlMessage& ctrlmsg);
|
void receiveExperimentResults(Minion& minion, FailControlMessage& ctrlmsg);
|
||||||
public:
|
public:
|
||||||
#ifndef __puma
|
#ifndef __puma
|
||||||
|
|||||||
@ -9,6 +9,7 @@ JobClient::JobClient(const std::string& server, int port)
|
|||||||
m_server_port = port;
|
m_server_port = port;
|
||||||
m_server = server;
|
m_server = server;
|
||||||
m_server_ent = gethostbyname(m_server.c_str());
|
m_server_ent = gethostbyname(m_server.c_str());
|
||||||
|
cout << "JobServer: " << m_server.c_str() << endl;
|
||||||
if(m_server_ent == NULL) {
|
if(m_server_ent == NULL) {
|
||||||
perror("[Client@gethostbyname()]");
|
perror("[Client@gethostbyname()]");
|
||||||
// TODO: Log-level?
|
// TODO: Log-level?
|
||||||
@ -39,12 +40,12 @@ bool JobClient::connectToServer()
|
|||||||
/* Enable address reuse */
|
/* Enable address reuse */
|
||||||
int on = 1;
|
int on = 1;
|
||||||
setsockopt( m_sockfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on) );
|
setsockopt( m_sockfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on) );
|
||||||
|
|
||||||
memset(&serv_addr, 0, sizeof(serv_addr));
|
memset(&serv_addr, 0, sizeof(serv_addr));
|
||||||
serv_addr.sin_family = AF_INET;
|
serv_addr.sin_family = AF_INET;
|
||||||
memcpy(&serv_addr.sin_addr.s_addr, m_server_ent->h_addr, m_server_ent->h_length);
|
memcpy(&serv_addr.sin_addr.s_addr, m_server_ent->h_addr, m_server_ent->h_length);
|
||||||
serv_addr.sin_port = htons(m_server_port);
|
serv_addr.sin_port = htons(m_server_port);
|
||||||
|
|
||||||
int retries = CLIENT_RETRY_COUNT;
|
int retries = CLIENT_RETRY_COUNT;
|
||||||
while (true) {
|
while (true) {
|
||||||
if (connect(m_sockfd, (sockaddr *)&serv_addr, sizeof(serv_addr)) < 0) {
|
if (connect(m_sockfd, (sockaddr *)&serv_addr, sizeof(serv_addr)) < 0) {
|
||||||
@ -78,7 +79,7 @@ bool JobClient::getParam(ExperimentData& exp)
|
|||||||
while (1) { // Here we try to acquire a parameter set
|
while (1) { // Here we try to acquire a parameter set
|
||||||
switch (tryToGetExperimentData(exp)) {
|
switch (tryToGetExperimentData(exp)) {
|
||||||
// Jobserver will sent workload, params are set in \c exp
|
// Jobserver will sent workload, params are set in \c exp
|
||||||
case FailControlMessage::WORK_FOLLOWS:
|
case FailControlMessage::WORK_FOLLOWS:
|
||||||
return true;
|
return true;
|
||||||
// Nothing to do right now, but maybe later
|
// Nothing to do right now, but maybe later
|
||||||
case FailControlMessage::COME_AGAIN:
|
case FailControlMessage::COME_AGAIN:
|
||||||
@ -92,9 +93,9 @@ bool JobClient::getParam(ExperimentData& exp)
|
|||||||
|
|
||||||
FailControlMessage_Command JobClient::tryToGetExperimentData(ExperimentData& exp)
|
FailControlMessage_Command JobClient::tryToGetExperimentData(ExperimentData& exp)
|
||||||
{
|
{
|
||||||
|
|
||||||
FailControlMessage ctrlmsg;
|
FailControlMessage ctrlmsg;
|
||||||
|
|
||||||
//Are there other jobs for the experiment
|
//Are there other jobs for the experiment
|
||||||
if (m_parameters.size() == 0) {
|
if (m_parameters.size() == 0) {
|
||||||
|
|
||||||
@ -129,7 +130,7 @@ FailControlMessage_Command JobClient::tryToGetExperimentData(ExperimentData& exp
|
|||||||
uint32_t i;
|
uint32_t i;
|
||||||
for (i = 0 ; i < ctrlmsg.job_size() ; i++) {
|
for (i = 0 ; i < ctrlmsg.job_size() ; i++) {
|
||||||
ExperimentData* temp_exp = new ExperimentData(exp.getMessage().New());
|
ExperimentData* temp_exp = new ExperimentData(exp.getMessage().New());
|
||||||
|
|
||||||
if (!SocketComm::rcvMsg(m_sockfd, temp_exp->getMessage())) {
|
if (!SocketComm::rcvMsg(m_sockfd, temp_exp->getMessage())) {
|
||||||
// Failed to receive message? Retry.
|
// Failed to receive message? Retry.
|
||||||
close(m_sockfd);
|
close(m_sockfd);
|
||||||
@ -144,28 +145,28 @@ FailControlMessage_Command JobClient::tryToGetExperimentData(ExperimentData& exp
|
|||||||
case FailControlMessage::COME_AGAIN:
|
case FailControlMessage::COME_AGAIN:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
close(m_sockfd);
|
close(m_sockfd);
|
||||||
|
|
||||||
//start time measurement for throughput calculation
|
//start time measurement for throughput calculation
|
||||||
m_job_runtime.startTimer();
|
m_job_runtime.startTimer();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m_parameters.size() != 0) {
|
if (m_parameters.size() != 0) {
|
||||||
exp.getMessage().CopyFrom(m_parameters.front()->getMessage());
|
exp.getMessage().CopyFrom(m_parameters.front()->getMessage());
|
||||||
exp.setWorkloadID(m_parameters.front()->getWorkloadID());
|
exp.setWorkloadID(m_parameters.front()->getWorkloadID());
|
||||||
|
|
||||||
delete &m_parameters.front()->getMessage();
|
delete &m_parameters.front()->getMessage();
|
||||||
delete m_parameters.front();
|
delete m_parameters.front();
|
||||||
m_parameters.pop_front();
|
m_parameters.pop_front();
|
||||||
|
|
||||||
return FailControlMessage::WORK_FOLLOWS;
|
return FailControlMessage::WORK_FOLLOWS;
|
||||||
} else {
|
} else {
|
||||||
return ctrlmsg.command();
|
return ctrlmsg.command();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool JobClient::sendResult(ExperimentData& result)
|
bool JobClient::sendResult(ExperimentData& result)
|
||||||
@ -174,28 +175,28 @@ bool JobClient::sendResult(ExperimentData& result)
|
|||||||
ExperimentData* temp_exp = new ExperimentData(result.getMessage().New());
|
ExperimentData* temp_exp = new ExperimentData(result.getMessage().New());
|
||||||
temp_exp->getMessage().CopyFrom(result.getMessage());
|
temp_exp->getMessage().CopyFrom(result.getMessage());
|
||||||
temp_exp->setWorkloadID(result.getWorkloadID());
|
temp_exp->setWorkloadID(result.getWorkloadID());
|
||||||
|
|
||||||
m_results.push_back( temp_exp );
|
m_results.push_back( temp_exp );
|
||||||
|
|
||||||
if (m_parameters.size() != 0) {
|
if (m_parameters.size() != 0) {
|
||||||
//If there are more jobs for the experiment store result
|
//If there are more jobs for the experiment store result
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
//Stop time measurement and calculate new throughput
|
//Stop time measurement and calculate new throughput
|
||||||
m_job_runtime.stopTimer();
|
m_job_runtime.stopTimer();
|
||||||
m_job_throughput = CLIENT_JOB_REQUEST_SEC/((double)m_job_runtime/m_results.size());
|
m_job_throughput = CLIENT_JOB_REQUEST_SEC/((double)m_job_runtime/m_results.size());
|
||||||
|
|
||||||
if (m_job_throughput > CLIENT_JOB_LIMIT) {
|
if (m_job_throughput > CLIENT_JOB_LIMIT) {
|
||||||
m_job_throughput = CLIENT_JOB_LIMIT;
|
m_job_throughput = CLIENT_JOB_LIMIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m_job_throughput < 1) {
|
if (m_job_throughput < 1) {
|
||||||
m_job_throughput = 1;
|
m_job_throughput = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Reset timer for new time measurement
|
//Reset timer for new time measurement
|
||||||
m_job_runtime.reset();
|
m_job_runtime.reset();
|
||||||
|
|
||||||
return sendResultsToServer();
|
return sendResultsToServer();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -213,9 +214,9 @@ bool JobClient::sendResultsToServer()
|
|||||||
ctrlmsg.set_build_id(42);
|
ctrlmsg.set_build_id(42);
|
||||||
ctrlmsg.set_run_id(m_server_runid);
|
ctrlmsg.set_run_id(m_server_runid);
|
||||||
ctrlmsg.set_job_size(m_results.size()); //Store how many results will be sent
|
ctrlmsg.set_job_size(m_results.size()); //Store how many results will be sent
|
||||||
|
|
||||||
cout << "[Client] Sending back result [";
|
cout << "[Client] Sending back result [";
|
||||||
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
for (i = 0; i < m_results.size() ; i++) {
|
for (i = 0; i < m_results.size() ; i++) {
|
||||||
ctrlmsg.add_workloadid(m_results[i]->getWorkloadID());
|
ctrlmsg.add_workloadid(m_results[i]->getWorkloadID());
|
||||||
@ -223,10 +224,10 @@ bool JobClient::sendResultsToServer()
|
|||||||
cout << " ";
|
cout << " ";
|
||||||
}
|
}
|
||||||
cout << "]";
|
cout << "]";
|
||||||
|
|
||||||
// TODO: Log-level?
|
// TODO: Log-level?
|
||||||
SocketComm::sendMsg(m_sockfd, ctrlmsg);
|
SocketComm::sendMsg(m_sockfd, ctrlmsg);
|
||||||
|
|
||||||
for (i = 0; i < ctrlmsg.job_size() ; i++) {
|
for (i = 0; i < ctrlmsg.job_size() ; i++) {
|
||||||
SocketComm::sendMsg(m_sockfd, m_results.front()->getMessage());
|
SocketComm::sendMsg(m_sockfd, m_results.front()->getMessage());
|
||||||
delete &m_results.front()->getMessage();
|
delete &m_results.front()->getMessage();
|
||||||
|
|||||||
@ -18,7 +18,7 @@ namespace fail {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* \class JobClient
|
* \class JobClient
|
||||||
*
|
*
|
||||||
* \brief Manages communication with JobServer
|
* \brief Manages communication with JobServer
|
||||||
* The Minion's JobClient requests ExperimentData and returns results.
|
* The Minion's JobClient requests ExperimentData and returns results.
|
||||||
*/
|
*/
|
||||||
@ -29,16 +29,16 @@ private:
|
|||||||
struct hostent* m_server_ent;
|
struct hostent* m_server_ent;
|
||||||
int m_sockfd;
|
int m_sockfd;
|
||||||
uint64_t m_server_runid;
|
uint64_t m_server_runid;
|
||||||
|
|
||||||
WallclockTimer m_job_runtime;
|
WallclockTimer m_job_runtime;
|
||||||
int m_job_throughput;
|
int m_job_throughput;
|
||||||
std::deque<ExperimentData*> m_parameters;
|
std::deque<ExperimentData*> m_parameters;
|
||||||
std::deque<ExperimentData*> m_results;
|
std::deque<ExperimentData*> m_results;
|
||||||
|
|
||||||
bool connectToServer();
|
bool connectToServer();
|
||||||
bool sendResultsToServer();
|
bool sendResultsToServer();
|
||||||
FailControlMessage_Command tryToGetExperimentData(ExperimentData& exp);
|
FailControlMessage_Command tryToGetExperimentData(ExperimentData& exp);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
JobClient(const std::string& server = SERVER_COMM_HOSTNAME, int port = SERVER_COMM_TCP_PORT);
|
JobClient(const std::string& server = SERVER_COMM_HOSTNAME, int port = SERVER_COMM_TCP_PORT);
|
||||||
~JobClient();
|
~JobClient();
|
||||||
@ -46,7 +46,7 @@ public:
|
|||||||
* Receive experiment data set from (remote) JobServer
|
* Receive experiment data set from (remote) JobServer
|
||||||
* The caller (experiment developer) is responsible for
|
* The caller (experiment developer) is responsible for
|
||||||
* allocating his ExperimentData object.
|
* allocating his ExperimentData object.
|
||||||
*
|
*
|
||||||
* @param exp Reference to a ExperimentData object allocated by the caller!
|
* @param exp Reference to a ExperimentData object allocated by the caller!
|
||||||
* @return \c true if parameter have been received and put into \c exp, \c false else.
|
* @return \c true if parameter have been received and put into \c exp, \c false else.
|
||||||
*/
|
*/
|
||||||
@ -55,14 +55,14 @@ public:
|
|||||||
* Send back experiment result to the (remote) JobServer
|
* Send back experiment result to the (remote) JobServer
|
||||||
* The caller (experiment developer) is responsible for
|
* The caller (experiment developer) is responsible for
|
||||||
* destroying his ExperimentData object afterwards.
|
* destroying his ExperimentData object afterwards.
|
||||||
*
|
*
|
||||||
* @param result Reference to the ExperimentData holding result values
|
* @param result Reference to the ExperimentData holding result values
|
||||||
* @return \c true Result successfully sent, \c false else.
|
* @return \c true Result successfully sent, \c false else.
|
||||||
*/
|
*/
|
||||||
bool sendResult(ExperimentData& result);
|
bool sendResult(ExperimentData& result);
|
||||||
/**
|
/**
|
||||||
* Return the number of undone jobs that have already been fetched from the server.
|
* Return the number of undone jobs that have already been fetched from the server.
|
||||||
*
|
*
|
||||||
* @return the number of undone jobs.
|
* @return the number of undone jobs.
|
||||||
*/
|
*/
|
||||||
int getNumberOfUndoneJobs() { return m_parameters.size(); }
|
int getNumberOfUndoneJobs() { return m_parameters.size(); }
|
||||||
|
|||||||
@ -26,7 +26,7 @@ bool TroubleListener::isMatching(const TroubleEvent* pEv) const
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TroubleListener::removeWatchNumber(unsigned troubleNum)
|
bool TroubleListener::removeWatchNumber(unsigned troubleNum)
|
||||||
{
|
{
|
||||||
for (unsigned i = 0; i < m_WatchNumbers.size(); i++) {
|
for (unsigned i = 0; i < m_WatchNumbers.size(); i++) {
|
||||||
if (m_WatchNumbers[i] == troubleNum) {
|
if (m_WatchNumbers[i] == troubleNum) {
|
||||||
@ -37,12 +37,12 @@ bool TroubleListener::removeWatchNumber(unsigned troubleNum)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TroubleListener::addWatchNumber(unsigned troubleNumber)
|
bool TroubleListener::addWatchNumber(unsigned troubleNumber)
|
||||||
{
|
{
|
||||||
for (unsigned i = 0; i < m_WatchNumbers.size(); i++) {
|
for (unsigned i = 0; i < m_WatchNumbers.size(); i++) {
|
||||||
if (m_WatchNumbers[i] == troubleNumber)
|
if (m_WatchNumbers[i] == troubleNumber)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
m_WatchNumbers.push_back(troubleNumber);
|
m_WatchNumbers.push_back(troubleNumber);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,7 +26,7 @@ void ElfReader::setup(const char* path) {
|
|||||||
fseek(fp,(off_t)0,SEEK_SET);
|
fseek(fp,(off_t)0,SEEK_SET);
|
||||||
read_ELF_file_header(fp, &ehdr);
|
read_ELF_file_header(fp, &ehdr);
|
||||||
num_hdrs=ehdr.e_shnum;
|
num_hdrs=ehdr.e_shnum;
|
||||||
m_log << "Evaluating ELF File: " << path << std::endl;
|
m_log << "Evaluating ELF File: " << path << std::endl;
|
||||||
// Parse symbol table and generate internal map
|
// Parse symbol table and generate internal map
|
||||||
for(i=0;i<num_hdrs;i++)
|
for(i=0;i<num_hdrs;i++)
|
||||||
{
|
{
|
||||||
@ -39,7 +39,6 @@ void ElfReader::setup(const char* path) {
|
|||||||
if((sec_hdr.sh_type==SHT_SYMTAB)||(sec_hdr.sh_type==SHT_DYNSYM))
|
if((sec_hdr.sh_type==SHT_SYMTAB)||(sec_hdr.sh_type==SHT_DYNSYM))
|
||||||
{
|
{
|
||||||
process_symboltable(i,fp);
|
process_symboltable(i,fp);
|
||||||
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -88,14 +87,14 @@ int ElfReader::process_symboltable(int sect_num, FILE* fp){
|
|||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
//get the size of strtab in file and allocate a buffer
|
//get the size of strtab in file and allocate a buffer
|
||||||
name_buf=(char*)malloc(sect_hdr.sh_size);
|
name_buf=(char*)malloc(sect_hdr.sh_size);
|
||||||
if(!name_buf)
|
if(!name_buf)
|
||||||
return -1;
|
return -1;
|
||||||
//get the offset of strtab in file and seek to it
|
//get the offset of strtab in file and seek to it
|
||||||
fseek(fp,sect_hdr.sh_offset,SEEK_SET);
|
fseek(fp,sect_hdr.sh_offset,SEEK_SET);
|
||||||
//read all data from the section to the buffer.
|
//read all data from the section to the buffer.
|
||||||
fread(name_buf,sect_hdr.sh_size,1,fp);
|
fread(name_buf,sect_hdr.sh_size,1,fp);
|
||||||
//so we have the namebuf now seek to symtab data
|
//so we have the namebuf now seek to symtab data
|
||||||
fseek(fp,sym_data_offset,SEEK_SET);
|
fseek(fp,sym_data_offset,SEEK_SET);
|
||||||
|
|
||||||
|
|||||||
@ -39,7 +39,7 @@ namespace fail {
|
|||||||
* Both mangled an demangled symbols are searched.
|
* Both mangled an demangled symbols are searched.
|
||||||
* @param name The symbol name as string
|
* @param name The symbol name as string
|
||||||
* @return The according address if found, else ADDR_INV
|
* @return The according address if found, else ADDR_INV
|
||||||
*/
|
*/
|
||||||
guest_address_t getAddressByName(const std::string& name) ;
|
guest_address_t getAddressByName(const std::string& name) ;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
Reference in New Issue
Block a user