!1325 Fix MD print too many MS_LOG(INFO) problem

Merge pull request !1325 from xiefangqi/xfq_fix_MSINFO
pull/1325/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit d6793ca9d4

@ -136,7 +136,7 @@ void DEPipeline::PrintTree() {
for (auto itr = tree_->begin(); itr != tree_->end(); ++itr) {
std::stringstream ss;
ss << *itr;
MS_LOG(INFO) << "Operator ID is " << itr->id() << ". Details: " << ss.str().c_str() << ".";
MS_LOG(DEBUG) << "Operator ID is " << itr->id() << ". Details: " << ss.str().c_str() << ".";
}
}

@ -502,7 +502,7 @@ Status Tensor::InsertTensor(const std::vector<dsize_t> &ind, const std::shared_p
err_msg += (!StartAddrOfIndex(ind, &start_addr_of_ind, &remaining_shape).IsOk()) ? "[Tensor] incorrect index\n" : "";
err_msg += !(remaining_shape == tensor->shape()) ? "[Tensor] memory error\n" : "";
if (!err_msg.empty()) {
MS_LOG(INFO) << "Insert tensor message: " << err_msg;
MS_LOG(DEBUG) << "Insert tensor message: " << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
} else {
if (start_addr_of_ind != nullptr) {
@ -512,7 +512,7 @@ Status Tensor::InsertTensor(const std::vector<dsize_t> &ind, const std::shared_p
return Status::OK();
} else {
err_msg += "[Tensor] error in memcpy_s when inserting tensor\n";
MS_LOG(INFO) << "Tensor message: " << err_msg;
MS_LOG(DEBUG) << "Tensor message: " << err_msg;
RETURN_STATUS_UNEXPECTED(err_msg);
}
} else {

@ -73,7 +73,7 @@ class Connector {
// @param queue_capacity The number of element (DataBuffer) for each queue.
Connector(int32_t n_producers, int32_t n_consumers, int32_t queue_capacity)
: num_producers_(n_producers), num_consumers_(n_consumers) {
MS_LOG(INFO) << "A connector is created with " << n_producers << " producers and " << n_consumers << " consumers.";
MS_LOG(DEBUG) << "A connector is created with " << n_producers << " producers and " << n_consumers << " consumers.";
my_name_ = Services::GetUniqueID();
// We require the consumers to have ids sequentially from 0 to the num_consumers_-1,
// Otherwise a ordered list of consumer ids have to be passed here. (not implemented yet)
@ -138,7 +138,7 @@ class Connector {
}
expect_consumer_ = 0;
pop_from_ = 0;
MS_LOG(INFO) << "Connector counters reset.";
MS_LOG(DEBUG) << "Connector counters reset.";
}
void Print(std::ostream &out, bool showAll) const {

@ -66,8 +66,8 @@ ColDescriptor::ColDescriptor(const std::string &col_name, DataType col_type, Ten
// the input rank
if (in_shape != nullptr && in_shape->known() && in_shape->Size() != rank_) {
rank_ = in_shape->Size();
MS_LOG(INFO) << "Rank does not match the number of dimensions in the provided shape."
<< " Overriding rank with the number of dimensions in the provided shape.";
MS_LOG(WARNING) << "Rank does not match the number of dimensions in the provided shape."
<< " Overriding rank with the number of dimensions in the provided shape.";
}
}

@ -110,7 +110,7 @@ Status DatasetIterator::FetchNextTensorRow(TensorRow *out_row) {
// An eoe buffer will be immediately followed by an eof buffer, which signals the shutdown of
// all operators.
if (curr_buffer_->eoe()) {
MS_LOG(INFO) << "End of data iteration. Fetch eof and then return empty row.";
MS_LOG(DEBUG) << "End of data iteration. Fetch eof and then return empty row.";
// Before returning the last empty vector, fetch the eof buffer which should be the last
// buffer, and then free it.
@ -199,13 +199,13 @@ Status ChildIterator::FetchNextTensorRow(TensorRow *out_row) {
// Instead, if an eoe is picked up here, we simply return an empty vector and it's up to the
// caller to decide what it wants to do next.
if (curr_buffer_->eoe()) {
MS_LOG(INFO) << "Child iterator picked up EOE.";
MS_LOG(DEBUG) << "Child iterator picked up EOE.";
end_epoch_ = true;
return Status::OK();
}
if (curr_buffer_->eof()) {
MS_LOG(INFO) << "Child iterator picked up EOF.";
MS_LOG(DEBUG) << "Child iterator picked up EOF.";
eof_handled_ = true;
return Status::OK();
}
@ -226,10 +226,10 @@ Status ChildIterator::Drain() {
// - drain (will not actually drain because you are already at the end of the iteration)
// However, the next time after that, it will perform it's normal draining activities.
end_epoch_ = false;
MS_LOG(INFO) << "No operation drain, already at end of epoch.";
MS_LOG(DEBUG) << "No operation drain, already at end of epoch.";
return Status::OK();
}
MS_LOG(INFO) << "Child draining buffers until eoe.";
MS_LOG(DEBUG) << "Child draining buffers until eoe.";
// else we drain until eoe or eof, eof here is for sanity check
while (!curr_buffer_->eoe() && !curr_buffer_->eof()) {
RETURN_IF_NOT_OK(current_op_->GetNextInput(&curr_buffer_, worker_id_, child_idx_));

@ -79,15 +79,15 @@ std::shared_ptr<DatasetOp> DatasetOp::child(int32_t child_index) const {
// Creates the connector within this operator
void DatasetOp::CreateConnector(int32_t num_producers, int32_t num_consumers) {
MS_LOG(INFO) << "Creating connector in tree operator: " << operator_id_ << ". Producer: " << num_producers
<< ". Consumer: " << num_consumers << ".";
MS_LOG(DEBUG) << "Creating connector in tree operator: " << operator_id_ << ". Producer: " << num_producers
<< ". Consumer: " << num_consumers << ".";
if (oc_queue_size_ > 0) {
out_connector_ = std::make_unique<DbConnector>(num_producers, // The number of producers
num_consumers, // Only one consumer (the training App)
oc_queue_size_);
} else {
// Some op's may choose not to have an output connector
MS_LOG(INFO) << "Bypassed connector creation for tree operator: " << operator_id_ << ".";
MS_LOG(DEBUG) << "Bypassed connector creation for tree operator: " << operator_id_ << ".";
out_connector_ = nullptr;
}
}
@ -246,7 +246,7 @@ Status DatasetOp::AssignColMapFromChild() {
RETURN_STATUS_UNEXPECTED("Child column name map cannot be empty!");
}
}
MS_LOG(INFO) << "Setting column map after first fetch:\n" << DatasetOp::ColumnNameMapAsString();
MS_LOG(DEBUG) << "Setting column map after first fetch:\n" << DatasetOp::ColumnNameMapAsString();
}
return Status::OK();
}

@ -313,7 +313,7 @@ Status MapOp::WorkerEntryInit(const DataBuffer *in_buf) {
}
} // mutex lock will release here
MS_LOG(INFO) << "Column name map for map op set: " << this->ColumnNameMapAsString();
MS_LOG(DEBUG) << "Column name map for map op set: " << this->ColumnNameMapAsString();
return Status::OK();
}

@ -120,10 +120,10 @@ Status ProjectOp::operator()() { RETURN_STATUS_UNEXPECTED("Logic error. ProjectO
int32_t ProjectOp::num_consumers() const {
if (parent_.empty()) {
MS_LOG(INFO) << "Project operator, no parent node, assuming it's the root and returning 1.";
MS_LOG(DEBUG) << "Project operator, no parent node, assuming it's the root and returning 1.";
return 1;
} else if (parent_[0] == nullptr) {
MS_LOG(INFO) << "Project operator, pointer to the first parent is null. Returning 0.";
MS_LOG(DEBUG) << "Project operator, pointer to the first parent is null. Returning 0.";
return 0;
} else {
return parent_[0]->num_consumers();
@ -132,7 +132,7 @@ int32_t ProjectOp::num_consumers() const {
int32_t ProjectOp::num_producers() const {
if (child_.empty() || child_[0] == nullptr) {
MS_LOG(INFO) << "Project operator, pointer to child node is null. Returning 0.";
MS_LOG(DEBUG) << "Project operator, pointer to child node is null. Returning 0.";
return 0;
} else {
return child_[0]->num_producers();

@ -84,13 +84,13 @@ Status RenameOp::operator()() {
} // end of while eoe loop
// we got eoe, now try again until we get eof
MS_LOG(INFO) << "Rename operator EOE Received.";
MS_LOG(DEBUG) << "Rename operator EOE Received.";
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOE))));
MS_LOG(DEBUG) << "Rename operator fetching buffer after EOE.";
RETURN_IF_NOT_OK(GetNextInput(&curr_buffer));
} // end of while eof loop
MS_LOG(INFO) << "Rename opeerator EOF Received.";
MS_LOG(DEBUG) << "Rename opeerator EOF Received.";
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOF))));
return Status::OK();
}
@ -116,18 +116,18 @@ Status RenameOp::RenameColumns() {
// found
found += 1;
int index = std::distance(in_columns_.begin(), it);
MS_LOG(INFO) << "Rename operator index found " << index << " value " << id << ".";
MS_LOG(DEBUG) << "Rename operator index found " << index << " value " << id << ".";
new_col_name_id_map[out_columns_[index]] = id;
} else {
// not found
MS_LOG(INFO) << "Rename operator index not found: " << id << " is the column id.";
MS_LOG(DEBUG) << "Rename operator index not found: " << id << " is the column id.";
new_col_name_id_map[name] = id;
}
}
// only checks number of renamed columns have been found, this input check doesn't check everything
if (found != in_columns_.size()) {
MS_LOG(INFO) << "Rename operator column names found: " << found << " out of " << in_columns_.size() << ".";
MS_LOG(DEBUG) << "Rename operator column names found: " << found << " out of " << in_columns_.size() << ".";
std::string err_msg = "Renamed column doesn't exist in dataset";
RETURN_STATUS_UNEXPECTED(err_msg);
}
@ -163,7 +163,7 @@ void RenameOp::Print(std::ostream &out, // In: The output stream to print t
}
Status RenameOp::EofReceived(int32_t) {
MS_LOG(INFO) << "Rename operator EOF received, do nothing now.";
MS_LOG(DEBUG) << "Rename operator EOF received, do nothing now.";
return Status::OK();
}

@ -132,7 +132,7 @@ Status RepeatOp::GetNextBuffer(std::unique_ptr<DataBuffer> *p_buffer, int32_t wo
// Base-class override for handling cases when an eoe is received.
Status RepeatOp::EoeReceived(int32_t worker_id) {
repeat_count_++;
MS_LOG(INFO) << "Repeat operator end of epoch message received. Repeat count is now: " << repeat_count_ << ".";
MS_LOG(DEBUG) << "Repeat operator end of epoch message received. Repeat count is now: " << repeat_count_ << ".";
bool repeated = BitTest(op_ctrl_flags_, kDeOpRepeated);
bool last_repeat = BitTest(op_ctrl_flags_, kDeOpLastRepeat);
// If we've reached the requested repeat count, then flag the eoe nodes
@ -164,16 +164,16 @@ Status RepeatOp::operator()() { RETURN_STATUS_UNEXPECTED("Logic error. RepeatOp
// Base-class override for handling cases when an eof is received.
Status RepeatOp::EofReceived(int32_t worker_id) {
MS_LOG(INFO) << "Repeat operator EOF received, do nothing now.";
MS_LOG(DEBUG) << "Repeat operator EOF received, do nothing now.";
return Status::OK();
}
int32_t RepeatOp::num_consumers() const {
if (parent_.empty()) {
MS_LOG(INFO) << "Repeat operator, no parent node, assuming it's root and returning 1.";
MS_LOG(DEBUG) << "Repeat operator, no parent node, assuming it's root and returning 1.";
return 1;
} else if (parent_[0] == nullptr) {
MS_LOG(INFO) << "Repeat operator, pointer to the first parent is null. Returning 0.";
MS_LOG(DEBUG) << "Repeat operator, pointer to the first parent is null. Returning 0.";
return 0;
} else {
return parent_[0]->num_consumers();
@ -182,7 +182,7 @@ int32_t RepeatOp::num_consumers() const {
int32_t RepeatOp::num_producers() const {
if (child_.empty() || child_[0] == nullptr) {
MS_LOG(INFO) << "Repeat operator, pointer to child node is null. Returning 0.";
MS_LOG(DEBUG) << "Repeat operator, pointer to child node is null. Returning 0.";
return 0;
} else {
return child_[0]->num_producers();

@ -82,7 +82,7 @@ ShuffleOp::ShuffleOp(int32_t shuffle_size, uint32_t shuffle_seed, int32_t op_con
// Private function to re-init the shuffle op for another epoch. Shuffle op calls this by
// itself rather than waiting for the reset driven from operators above it in the pipeline.
Status ShuffleOp::SelfReset() {
MS_LOG(INFO) << "Shuffle operator performing a self-reset.";
MS_LOG(DEBUG) << "Shuffle operator performing a self-reset.";
// If ReshuffleEachEpoch is false, then we always use the same seed for every
// epoch.
// If ReshuffleEachEpoch is true, then the first epoch uses the given seed,
@ -224,7 +224,7 @@ Status ShuffleOp::operator()() {
// Since we overloaded eoeReceived function, we are responsible to flow the EOE up the
// pipepline manually now that we are done draining the shuffle buffer
MS_LOG(INFO) << "Shuffle operator sending EOE.";
MS_LOG(DEBUG) << "Shuffle operator sending EOE.";
auto eoe_buffer = std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOE);
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoe_buffer)));
@ -239,7 +239,7 @@ Status ShuffleOp::operator()() {
// Private function populate the shuffle buffer initially by fetching from the child output
// connector until the shuffle buffer is full (or there is no more data coming).
Status ShuffleOp::InitShuffleBuffer() {
MS_LOG(INFO) << "Shuffle operator initializing the shuffle buffer.";
MS_LOG(DEBUG) << "Shuffle operator initializing the shuffle buffer.";
// The first phase of this operator is to read incoming buffers and then drain those
// rows from the buffers, putting them into our own local table of tensors (the shuffle
@ -258,7 +258,7 @@ Status ShuffleOp::InitShuffleBuffer() {
RETURN_IF_NOT_OK(child_iterator_->FetchNextTensorRow(&new_row));
if (child_iterator_->eof_handled()) {
MS_LOG(INFO) << "Shuffle operator init picked up EOF. No more epochs.";
MS_LOG(DEBUG) << "Shuffle operator init picked up EOF. No more epochs.";
return Status::OK();
}
@ -289,7 +289,7 @@ Status ShuffleOp::InitShuffleBuffer() {
shuffle_buffer_state_ = kShuffleStateDrain;
}
MS_LOG(INFO) << "Shuffle operator finished intializing the shuffle buffer.";
MS_LOG(DEBUG) << "Shuffle operator finished intializing the shuffle buffer.";
return Status::OK();
}

@ -216,8 +216,8 @@ Status CelebAOp::ParseImageAttrInfo() {
Path path(folder_path_);
Path file_path = path / split[0];
if (!extensions_.empty() && extensions_.find(file_path.Extension()) == extensions_.end()) {
MS_LOG(INFO) << "Unsupported file found at " << file_path.toString().c_str() << ", its extension is "
<< file_path.Extension().c_str() << ".";
MS_LOG(WARNING) << "Unsupported file found at " << file_path.toString().c_str() << ", its extension is "
<< file_path.Extension().c_str() << ".";
continue;
}
image_labels.first = split[0];

@ -33,7 +33,7 @@ GeneratorOp::Builder::Builder() {
Status GeneratorOp::Builder::SanityCheck() {
// Update queue size to fit the prefetch requirement
MS_LOG(INFO) << "Generator operator sanity check, prefetch size is " << build_prefetch_size_ << ".";
MS_LOG(DEBUG) << "Generator operator sanity check, prefetch size is " << build_prefetch_size_ << ".";
if (build_prefetch_size_ > 0) {
build_op_connector_size_ = (build_prefetch_size_ + build_buffer_size_ - 1) / build_buffer_size_;
}
@ -221,15 +221,15 @@ Status GeneratorOp::operator()() {
}
if (eoe) {
// Push out EOE upon StopIteration exception from generator
MS_LOG(INFO) << "Generator operator sends out EOE.";
MS_LOG(DEBUG) << "Generator operator sends out EOE.";
std::unique_ptr<DataBuffer> eoe_buffer = std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOE);
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoe_buffer)));
if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) {
// If last repeat or not repeated, push out EOF and exit master loop
MS_LOG(INFO) << "Generator operator sends out EOF.";
MS_LOG(DEBUG) << "Generator operator sends out EOF.";
std::unique_ptr<DataBuffer> eof_buffer = std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOF);
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eof_buffer)));
MS_LOG(INFO) << "Generator operator main execution loop complete.";
MS_LOG(DEBUG) << "Generator operator main execution loop complete.";
eof = true;
} else {
// Waiting for repeatOp to start new epoch

@ -333,8 +333,8 @@ Status ImageFolderOp::PrescanWorkerEntry(int32_t worker_id) {
if (extensions_.empty() || extensions_.find(file.Extension()) != extensions_.end()) {
(void)imgs.insert(file.toString().substr(dirname_offset_));
} else {
MS_LOG(INFO) << "Image folder operator unsupported file found: " << file.toString()
<< ", extension: " << file.Extension() << ".";
MS_LOG(WARNING) << "Image folder operator unsupported file found: " << file.toString()
<< ", extension: " << file.Extension() << ".";
}
}
FolderImagesPair p = std::make_shared<std::pair<std::string, std::queue<ImageLabelPair>>>();

@ -387,7 +387,7 @@ Status MnistOp::WalkAllFiles() {
}
}
} else {
MS_LOG(INFO) << "Mnist operator unable to open directory " << dir.toString() << ".";
MS_LOG(WARNING) << "Mnist operator unable to open directory " << dir.toString() << ".";
}
std::sort(image_names_.begin(), image_names_.end());

@ -278,11 +278,11 @@ Status StorageOp::init() {
if (this->num_rows() % rows_per_buffer_ != 0) {
buffers_needed++;
}
MS_LOG(INFO) << "Master: Initializing StorageOp. Dataset files dir: " << dataset_files_dir_ << " Dataset type: "
<< static_cast<std::underlying_type<DatasetType>::type>(store_client_->schema()->dataset_type())
<< " Dataset schema file: " << schema_file_ << " Number of rows: " << num_rows_
<< " Rows per buffer: " << rows_per_buffer_ << " Num buffers (computed): " << buffers_needed
<< " Number of workers: " << num_workers_ << ".";
MS_LOG(DEBUG) << "Master: Initializing StorageOp. Dataset files dir: " << dataset_files_dir_ << " Dataset type: "
<< static_cast<std::underlying_type<DatasetType>::type>(store_client_->schema()->dataset_type())
<< " Dataset schema file: " << schema_file_ << " Number of rows: " << num_rows_
<< " Rows per buffer: " << rows_per_buffer_ << " Num buffers (computed): " << buffers_needed
<< " Number of workers: " << num_workers_ << ".";
// Next, create each buffer in a loop.
int32_t buff_id = 0;
@ -344,7 +344,7 @@ void StorageOp::Print(std::ostream &out, bool show_all) const {
// Private helper method. This one posts a control indicator for each worker thread to consume
// from the action queue. When the worker pops this msg, it will shut itself down gracefully.
Status StorageOp::PostEndOfData() {
MS_LOG(INFO) << "Master: Processed all of the buffers. Send end-of-data message to workers.";
MS_LOG(DEBUG) << "Master: Processed all of the buffers. Send end-of-data message to workers.";
// For each worker we add the message so that they can all get the memo
for (int32_t i = 0; i < num_workers_; ++i) {
@ -462,14 +462,14 @@ Status StorageOp::operator()() {
// Reduce the shared_ptr ref count of this buffer by removing it from the mDataBuffers
// table first before we push the buffer to output connector.
data_buffers_[buffer_id].reset();
MS_LOG(INFO) << "StorageOp master: Consumed buffer " << buffer_id << " from internal worker connector.";
MS_LOG(DEBUG) << "StorageOp master: Consumed buffer " << buffer_id << " from internal worker connector.";
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(fetched_buffer)));
MS_LOG(INFO) << "StorageOp master: pushed buffer " << buffer_id << " to output connector.";
MS_LOG(DEBUG) << "StorageOp master: pushed buffer " << buffer_id << " to output connector.";
// Now, check our loop exit conditions and perform appropriate end of data handling if
// we've reached the end of our scan.
if (buffers_fetched_ == num_buffers_to_fetch) {
MS_LOG(INFO) << "StorageOp master: Reached end of data.";
MS_LOG(DEBUG) << "StorageOp master: Reached end of data.";
// If we are not inside of a Repeat path in the tree, or we are in a repeat path but
// this was our last repeat, then we do a full quit here with eof control message.
@ -479,17 +479,17 @@ Status StorageOp::operator()() {
RETURN_IF_NOT_OK(this->PostEndOfData());
std::unique_ptr<DataBuffer> eoeBuffer = std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOE);
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoeBuffer)));
MS_LOG(INFO) << "StorageOp master: Flow end-of-data eof message.";
MS_LOG(DEBUG) << "StorageOp master: Flow end-of-data eof message.";
std::unique_ptr<DataBuffer> eofBuffer = std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOF);
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eofBuffer)));
MS_LOG(INFO) << "StorageOp master: Main execution loop complete.";
MS_LOG(DEBUG) << "StorageOp master: Main execution loop complete.";
done = true; // while loop exit
} else {
// We are in a repeat path and it's not the last repeat.
// Flow an end-of-epoch control message up the pipeline.
// RepeatOp above us somewhere in the tree will re-init us with the data to fetch again
// once it gets the end-of-epoch message.
MS_LOG(INFO) << "StorageOp master: Flow end-of-epoch eoe message.";
MS_LOG(DEBUG) << "StorageOp master: Flow end-of-epoch eoe message.";
std::unique_ptr<DataBuffer> eoe_buffer = std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOE);
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoe_buffer)));
@ -513,7 +513,7 @@ Status StorageOp::operator()() {
// The entry point code for when workers are launched.
Status StorageOp::WorkerEntry(int32_t worker_id) {
int32_t next_action_id = 0;
MS_LOG(INFO) << "Worker: StorageOp worker entry point.";
MS_LOG(DEBUG) << "Worker: StorageOp worker entry point.";
// Handshake with TaskManager to synchronize the creation
TaskManager::FindMe()->Post();
@ -524,18 +524,18 @@ Status StorageOp::WorkerEntry(int32_t worker_id) {
// Drive a load of this buffer and get a pointer to the buffer after it's loaded in
std::unique_ptr<DataBuffer> dB;
RETURN_IF_NOT_OK(this->GetBuffer(next_action_id, &dB));
MS_LOG(INFO) << "Worker: Loaded buffer " << next_action_id << ".";
MS_LOG(DEBUG) << "Worker: Loaded buffer " << next_action_id << ".";
// Add the buffer to the internal queue for master to consume from later.
// This could end up blocking if the queue is full in which case it waits here
// until the master can drain a buffer off the queue.
RETURN_IF_NOT_OK(worker_connector_->Add(worker_id, std::move(dB)));
MS_LOG(INFO) << "Worker: Pushed buffer " << next_action_id << " to internal worker connector.";
MS_LOG(DEBUG) << "Worker: Pushed buffer " << next_action_id << " to internal worker connector.";
// Get the next action id and loop
RETURN_IF_NOT_OK(action_queue_[worker_id]->PopFront(&next_action_id));
}
MS_LOG(INFO) << "Worker: Received end-of-data message. Worker complete.";
MS_LOG(DEBUG) << "Worker: Received end-of-data message. Worker complete.";
return Status::OK();
}
@ -576,12 +576,12 @@ Status StorageOp::LoadParallelConfig() {
RETURN_STATUS_UNEXPECTED("Invalid deviceNum");
}
if (device_id_ > MAX_INTEGER_INT32 || device_id_ >= device_num_) {
MS_LOG(INFO) << "In parallel config file " << data_distribution_file_ << ", wrong deviceID provided.";
MS_LOG(DEBUG) << "In parallel config file " << data_distribution_file_ << ", wrong deviceID provided.";
RETURN_STATUS_UNEXPECTED("Invalid deviceId");
}
shard_config_ = js.value("shardConfig", "");
if (shard_config_ != "ALL" && shard_config_ != "UNIQUE" && shard_config_ != "RANDOM") {
MS_LOG(INFO) << "In parallel config file " << data_distribution_file_ << " wrong mShardConfig provided.";
MS_LOG(DEBUG) << "In parallel config file " << data_distribution_file_ << " wrong mShardConfig provided.";
RETURN_STATUS_UNEXPECTED("Invalid shardConfig");
}
std::string shuffle_str = js.value("shuffle", "");
@ -590,8 +590,8 @@ Status StorageOp::LoadParallelConfig() {
} else if (shuffle_str == "OFF") {
shuffle_config_ = false;
} else {
MS_LOG(INFO) << "In parallel config file " << data_distribution_file_
<< ", shuffle config is wrong: it's not ON or OFF";
MS_LOG(DEBUG) << "In parallel config file " << data_distribution_file_
<< ", shuffle config is wrong: it's not ON or OFF";
RETURN_STATUS_UNEXPECTED("Invalid shuffle option");
}
seed_ = js.value("seed", 0);

@ -112,7 +112,7 @@ Status TFBuffer::ParseSingleExample(dataengine::Example *ptr) {
cur_reader_.open(cur_f_info_.fileName);
// Seek to the offset
(void)cur_reader_.seekg(static_cast<std::streamsize>(cur_f_info_.startOffset));
MS_LOG(INFO) << "got new file " << cur_f_info_.fileName << ".";
MS_LOG(DEBUG) << "got new file " << cur_f_info_.fileName << ".";
}
// one record in tf_file looks like:

@ -49,7 +49,7 @@ Status TFClient::Init() {
const std::string kExtensionTF = ".tfrecord";
bool schema_init = false;
if (!storage_op_->dataset_files_dir().empty()) {
MS_LOG(INFO) << "Reading dataset using datasetPath.";
MS_LOG(DEBUG) << "Reading dataset using datasetPath.";
Path data_set_directory(storage_op_->dataset_files_dir());
auto dirIt = Path::DirIterator::OpenDirectory(&data_set_directory);
if (dirIt) {
@ -73,7 +73,7 @@ Status TFClient::Init() {
RETURN_STATUS_UNEXPECTED("Unable to open directory " + data_set_directory.toString());
}
} else {
MS_LOG(INFO) << "Reading dataset using dataset files list.";
MS_LOG(DEBUG) << "Reading dataset using dataset files list.";
for (auto filename : storage_op_->dataset_file_list()) {
const std::vector<uint64_t> recs_lengths = ParseTfFileLines(filename);
v_total_file_rows_.emplace_back(std::pair<std::string, std::vector<uint64_t>>(filename, std::move(recs_lengths)));
@ -327,7 +327,7 @@ Status TFClient::ParseTfFileSchema(const std::string &filename) {
reader.close();
dataengine::Example tf_file;
if (!tf_file.ParseFromString(serialized_example)) {
std::string err_msg = "parse tf_file failed";
std::string err_msg = "parse tf_file failed, file name is " + filename;
RETURN_STATUS_UNEXPECTED(err_msg);
}
const dataengine::Features &example_features = tf_file.features();

@ -356,7 +356,7 @@ Status TFReaderOp::WorkerEntry(int32_t worker_id) {
int64_t start_offset = io_block->GetStartOffset();
int64_t end_offset = io_block->GetEndOffset();
RETURN_IF_NOT_OK(LoadFile(filename, start_offset, end_offset, worker_id));
MS_LOG(INFO) << "TFReader operator worker " << worker_id << " loaded file " << filename << ".";
MS_LOG(DEBUG) << "TFReader operator worker " << worker_id << " loaded file " << filename << ".";
}
} else {
std::unique_ptr<DataBuffer> eoe_buffer = std::make_unique<DataBuffer>(1, DataBuffer::kDeBFlagEOE);

@ -111,7 +111,7 @@ Status ZipOp::operator()() {
// 5 handle eof
// propagate eof here.
MS_LOG(INFO) << "Zip operator got EOF, propagating.";
MS_LOG(DEBUG) << "Zip operator got EOF, propagating.";
RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(std::make_unique<DataBuffer>(0, DataBuffer::kDeBFlagEOF))));
return Status::OK();
}
@ -188,12 +188,12 @@ Status ZipOp::getNextTensorRow(TensorRow *const new_zip_row) {
if (new_row.empty()) {
// If we did not get a row from any of the children, then it's the end of an epoch and we can move
// to drain state.
MS_LOG(INFO) << "Zip operator child iterator produced empty row.";
MS_LOG(DEBUG) << "Zip operator child iterator produced empty row.";
draining_ = true;
new_zip_row->clear();
// If we picked up an eof here, then we are completely done.
if ((child_iterators_[i])->eof_handled()) {
MS_LOG(INFO) << "Zip operator iterator got EOF.";
MS_LOG(DEBUG) << "Zip operator iterator got EOF.";
eof_ = true;
}
return Status::OK();

@ -120,7 +120,7 @@ Status ExecutionTree::Launch() {
}
std::ostringstream ss;
ss << *this;
MS_LOG(INFO) << "Printing the tree before launch tasks:\n" << ss.str();
MS_LOG(DEBUG) << "Printing the tree before launch tasks:\n" << ss.str();
for (auto itr = this->begin(); itr != this->end(); ++itr) {
// An inlined operator is one that has an output connector size of 0, and it does not
// require a thread to execute. Instead, the work of this operator is executed inlined

@ -24,9 +24,9 @@ const bool DecodeOp::kDefRgbFormat = true;
DecodeOp::DecodeOp(bool is_rgb_format) : is_rgb_format_(is_rgb_format) {
if (is_rgb_format_) { // RGB colour mode
MS_LOG(INFO) << "Decode colour mode is RGB.";
MS_LOG(DEBUG) << "Decode colour mode is RGB.";
} else {
MS_LOG(INFO) << "Decode colour mode is BGR.";
MS_LOG(DEBUG) << "Decode colour mode is BGR.";
}
}

@ -38,7 +38,7 @@ Status Arena::Init() {
RETURN_IF_NOT_OK(DeMalloc(size_in_MB_ * 1048576L, &ptr_, false));
// Divide the memory into blocks. Ignore the last partial block.
uint64_t num_blks = size_in_bytes_ / ARENA_BLK_SZ;
MS_LOG(INFO) << "Size of memory pool is " << num_blks << ", number of blocks of size is " << ARENA_BLK_SZ << ".";
MS_LOG(DEBUG) << "Size of memory pool is " << num_blks << ", number of blocks of size is " << ARENA_BLK_SZ << ".";
tr_.Insert(0, num_blks);
return Status::OK();
}

@ -542,7 +542,7 @@ std::unique_ptr<V> BPlusTree<K, V, A, C, T>::DoUpdate(const key_type &key, std::
leaf->rw_lock_.Unlock();
return old;
} else {
MS_LOG(INFO) << "Key not found. rc = " << static_cast<int>(rc) << ".";
MS_LOG(DEBUG) << "Key not found. rc = " << static_cast<int>(rc) << ".";
return nullptr;
}
} else {

@ -299,7 +299,7 @@ typename BPlusTree<K, V, A, C, T>::ConstIterator BPlusTree<K, V, A, C, T>::Searc
// on the leaf. The unlock will be handled by the iterator when it goes out of scope.
return ConstIterator(leaf, slot, true);
} else {
MS_LOG(INFO) << "Key not found. rc = " << static_cast<int>(rc) << ".";
MS_LOG(DEBUG) << "Key not found. rc = " << static_cast<int>(rc) << ".";
return cend();
}
} else {
@ -321,7 +321,7 @@ typename BPlusTree<K, V, A, C, T>::Iterator BPlusTree<K, V, A, C, T>::Search(con
// on the leaf. The unlock will be handled by the iterator when it goes out of scope.
return Iterator(leaf, slot, true);
} else {
MS_LOG(INFO) << "Key not found. rc = " << static_cast<int>(rc) << ".";
MS_LOG(DEBUG) << "Key not found. rc = " << static_cast<int>(rc) << ".";
return end();
}
} else {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save