cosmetics

Change-Id: Ifae805ae1e2dac95324e054af09a7b70f5d5b60c
This commit is contained in:
Horst Schirmeier
2013-04-22 14:24:02 +02:00
parent 2d45a2c52c
commit 0f16f18d75
19 changed files with 101 additions and 103 deletions

View File

@ -39,7 +39,7 @@ fi
for h in $FAIL_DISTRIBUTE_HOSTS
do
echo Distributing to $h ...
rsync -az --partial --delete-before --delete-excluded --exclude=core --exclude=trace.tc . $h:"$FAIL_EXPERIMENT_TARGETDIR" &
rsync -az --partial --delete-before --delete-excluded --exclude=core --exclude=*.tc . $h:"$FAIL_EXPERIMENT_TARGETDIR" &
done
wait

View File

@ -14,7 +14,7 @@ SCRIPTDIR=$(readlink -f $(dirname $0))
# env variable defaults
source $SCRIPTDIR/fail-env.sh
CMD="killall -q client.sh"
CMD="killall -q client.sh; killall -q fail-client"
CONNECTION_ATTEMPTS=2
SSH="ssh -o BatchMode=yes -o ConnectTimeout=60 -o ConnectionAttempts=$CONNECTION_ATTEMPTS"
@ -31,3 +31,6 @@ do
$SSH $h "$CMD $NCLIENTS" &
done
wait
echo "Done."

View File

@ -40,7 +40,6 @@ boost::mutex CommThread::m_CommMutex;
ExperimentData *JobServer::getDone()
{
#ifndef __puma
if (m_undoneJobs.Size() == 0
&& noMoreExperiments()
@ -151,7 +150,7 @@ void JobServer::run()
// TODO: Log-level?
return;
}
cout << "JobServer listening...." << endl;
cout << "JobServer listening ..." << endl;
// TODO: Log-level?
#ifndef __puma
boost::thread* th;
@ -266,12 +265,8 @@ void CommThread::sendPendingExperimentData(Minion& minion)
if (exp.size() != 0) {
ctrlmsg.set_job_size(exp.size());
cout << " >>[";
for ( i = 0; i < exp.size() ; i++) {
cout << " "<< ctrlmsg.workloadid(i) <<" ";
}
cout << "] " << flush;
cout << " >>[" << ctrlmsg.workloadid(0) << "+"
<< exp.size() << "] \r" << flush;
if (SocketComm::sendMsg(minion.getSocketDescriptor(), ctrlmsg)) {
for (i = 0; i < ctrlmsg.job_size(); i++) {
@ -310,7 +305,7 @@ void CommThread::sendPendingExperimentData(Minion& minion)
ctrlmsg.add_workloadid(workloadID); // set workload id
ctrlmsg.set_job_size(1); // In 2nd priority the jobserver send only one job
//cout << ">>[Server] Re-sending workload [" << workloadID << "]" << endl;
cout << ">>R[" << workloadID << "] " << flush;
cout << ">>R[" << workloadID << "] \r" << flush;
if (SocketComm::sendMsg(minion.getSocketDescriptor(), ctrlmsg)) {
SocketComm::sendMsg(minion.getSocketDescriptor(), temp_exp->getMessage());
}
@ -332,11 +327,10 @@ void CommThread::receiveExperimentResults(Minion& minion, FailControlMessage& ct
{
int i;
ExperimentData* exp = NULL; // Get exp* from running jobs
cout << " <<[ ";
for (i = 0; i < ctrlmsg.workloadid_size(); i++) {
cout << ctrlmsg.workloadid(i) << " ";
if (ctrlmsg.workloadid_size() > 0) {
cout << " <<[" << ctrlmsg.workloadid(0) << "+"
<< ctrlmsg.workloadid_size() << "] \r" << flush;
}
cout << "] " << flush;
#ifndef __puma
// Prevent re-sending jobs in sendPendingExperimentData:
// a) sendPendingExperimentData needs an intact job to serialize and send it.

View File

@ -112,21 +112,21 @@ public:
*/
ExperimentData* getDone();
/**
* The Campaign controller must signalize, that there will be no
* more parameter sets. We need this, as we allow concurrent parameter
* generation and distribution.
* The Campaign controller must signal that there will be no more parameter
* sets. We need this, as we allow concurrent parameter generation and
* distribution.
*/
void setNoMoreExperiments() { m_noMoreExps = true; }
/**
* Checks whether there are no more experiment paremeter sets.
* Checks whether there are no more experiment parameter sets.
* @return \c true if no more parameter sets available, \c false otherwise
* @see setNoMoreExperiments
*/
bool noMoreExperiments() const { return m_noMoreExps; }
/**
* The Campaign Controller can signalize, that the jobserver can
* stop listening for client connections.
* The Campaign Controller may signal that the jobserver can stop listening
* for client connections.
*/
void done() { m_finish = true; }
};

View File

@ -188,9 +188,7 @@ bool JobClient::sendResult(ExperimentData& result)
if (m_job_throughput > CLIENT_JOB_LIMIT) {
m_job_throughput = CLIENT_JOB_LIMIT;
}
if (m_job_throughput < 1) {
} else if (m_job_throughput < 1) {
m_job_throughput = 1;
}

View File

@ -115,6 +115,8 @@ public:
* Fire done: Callback from Simulator
*/
void fireInterruptDone();
virtual simtime_t getTimerTicks() { return bx_pc_system.time_ticks(); }
virtual simtime_t getTimerTicksPerSecond() { return bx_pc_system.time_ticks() / bx_pc_system.time_usec(); /* imprecise hack */ }
/* ********************************************************************
* BochsController-specific (not implemented in SimulatorController!):
* ********************************************************************/
@ -150,8 +152,6 @@ public:
* @see The uses SimulatorController::getCPU().
*/
ConcreteCPU& detectCPU(BX_CPU_C* pCPU) const;
virtual simtime_t getTimerTicks() { return bx_pc_system.time_ticks(); }
virtual simtime_t getTimerTicksPerSecond() { return bx_pc_system.time_ticks() / bx_pc_system.time_usec(); /* imprecise hack */ }
};
} // end-of-namespace: fail

View File

@ -251,7 +251,7 @@ int DatabaseProtobufAdapter::TypeBridge_message::gatherTypes(StringJoiner &inser
bool can_be_repeated = true; // default value
// For repeated messages
TypeBridge_message *top_level_msg;
TypeBridge_message *top_level_msg = 0;
const FieldOptions& field_options = field->options();
if (field_options.GetExtension(sql_ignore)) {
@ -411,9 +411,7 @@ int DatabaseProtobufAdapter::field_size_at_pos(const Message *msg, std::vector<i
}
bool DatabaseProtobufAdapter::insert_row(const google::protobuf::Message *msg) {
const Reflection *ref = msg->GetReflection();
const Descriptor *d = msg->GetDescriptor();
assert (d != 0 && ref != 0);
assert (msg->GetDescriptor() != 0 && msg->GetReflection() != 0);
MYSQL_BIND *bind = new MYSQL_BIND[top_level_msg.field_count];

View File

@ -28,7 +28,10 @@ private:
int nextpick;
// We need a window at least as wide as the number of clients we serve.
enum { pick_window_size = 2000 };
// FIXME better solution: when inbound queue is empty, *copy* in-flight map
// to a vector, iterate but don't delete; when at the end, copy in-flight
// map again and repeat
enum { pick_window_size = 50000 };
public:
SynchronizedMap() : nextpick(0) { }

View File

@ -68,11 +68,13 @@ public:
// Lock is automatically released in the wait and obtained
// again after the wait
#ifndef __puma
while (m_queue.size() == 0)
while (m_queue.size() == 0) {
m_cond.wait(lock);
}
#endif
// Retrieve the data from the queue
T result=m_queue.front(); m_queue.pop();
T result = m_queue.front();
m_queue.pop();
// Notify others that we have free slots
#ifndef __puma
@ -101,7 +103,8 @@ public:
// again after the wait
if (m_queue.size() > 0) {
// Retrieve the data from the queue
d = m_queue.front(); m_queue.pop();
d = m_queue.front();
m_queue.pop();
// Notify others that we have free slots
#ifndef __puma
if (m_queue.size() < capacity) {

View File

@ -1 +1 @@
TracePlugin_pb2.py{,c}
TracePlugin_pb2.py

View File

@ -14,13 +14,13 @@ using std::cerr;
using std::hex;
using std::dec;
Logger log("dump-trace", true);
Logger LOG("dump-trace", true);
std::istream& openStream(const char *input_file,
std::ifstream& normal_stream, igzstream& gz_stream) {
normal_stream.open(input_file);
if (!normal_stream) {
log << "couldn't open " << input_file << endl;
LOG << "couldn't open " << input_file << endl;
exit(-1);
}
unsigned char b1, b2;
@ -30,16 +30,16 @@ std::istream& openStream(const char *input_file,
normal_stream.close();
gz_stream.open(input_file);
if (!gz_stream) {
log << "couldn't open " << input_file << endl;
LOG << "couldn't open " << input_file << endl;
exit(-1);
}
//log << "opened file " << input_file << " in GZip mode" << endl;
//LOG << "opened file " << input_file << " in GZip mode" << endl;
return gz_stream;
}
normal_stream.seekg(0);
//log << "opened file " << input_file << " in normal mode" << endl;
//LOG << "opened file " << input_file << " in normal mode" << endl;
return normal_stream;
}

View File

@ -4,7 +4,6 @@ set(SRCS
DCiAOKernelImporter.cc
)
## This is the example's campaign server distributing experiment parameters
add_executable(import-trace main.cc ${SRCS} ${PROTO_SRCS} ${PROTO_HDRS})
add_executable(import-trace main.cc ${SRCS})
target_link_libraries(import-trace ${PROTOBUF_LIBRARY} -lmysqlclient fail-util fail-sal fail-comm)
install(TARGETS import-trace RUNTIME DESTINATION bin)

View File

@ -27,7 +27,7 @@ public:
virtual bool add_trace_event(instruction_count_t begin, instruction_count_t end,
const Trace_Event &event, bool is_fake = false) = 0;
void set_elf_file(fail::ElfReader *elf) {m_elf = elf;};
void set_elf_file(fail::ElfReader *elf) { m_elf = elf; }
protected:
private:
typedef std::map<fail::address_t, instruction_count_t> AddrLastaccessMap;

View File

@ -55,7 +55,7 @@ int main(int argc, char *argv[]) {
for (int i = 1; i < argc; ++i)
cmd.add_args(argv[i]);
CommandLine::option_handle IGNORE = cmd.addOption("", "", Arg::None, "USAGE: import-trace [options]");
cmd.addOption("", "", Arg::None, "USAGE: import-trace [options]");
CommandLine::option_handle HELP = cmd.addOption("h", "help", Arg::None, "-h/--help\t Print usage and exit");
CommandLine::option_handle TRACE_FILE = cmd.addOption("t", "trace-file", Arg::Required,
"-t/--trace-file\t File to load the execution trace from\n");

View File

@ -20,7 +20,7 @@ int main(int argc, char *argv[]) {
for (int i = 1; i < argc; ++i)
cmd.add_args(argv[i]);
CommandLine::option_handle IGNORE = cmd.addOption("", "", Arg::None, "USAGE: import-trace [options]");
cmd.addOption("", "", Arg::None, "USAGE: import-trace [options]");
CommandLine::option_handle HELP = cmd.addOption("h", "help", Arg::None, "-h,--help \tPrint usage and exit");
Database::cmdline_setup();