commit
a6fbf7ec28
@ -0,0 +1,150 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/naive_executor.h"
|
||||
#include "paddle/fluid/framework/channel.h"
|
||||
#include "paddle/fluid/framework/feed_fetch_method.h"
|
||||
#include "paddle/fluid/framework/lod_rank_table.h"
|
||||
#include "paddle/fluid/framework/lod_tensor_array.h"
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/framework/reader.h"
|
||||
#include "paddle/fluid/string/pretty_log.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
// These code can be shared with Executor.
|
||||
static void InitializeVariable(Variable *var, proto::VarType::Type var_type) {
|
||||
if (var_type == proto::VarType::LOD_TENSOR) {
|
||||
var->GetMutable<LoDTensor>();
|
||||
} else if (var_type == proto::VarType::SELECTED_ROWS) {
|
||||
var->GetMutable<SelectedRows>();
|
||||
} else if (var_type == proto::VarType::FEED_MINIBATCH) {
|
||||
var->GetMutable<FeedFetchList>();
|
||||
} else if (var_type == proto::VarType::FETCH_LIST) {
|
||||
var->GetMutable<FeedFetchList>();
|
||||
} else if (var_type == proto::VarType::STEP_SCOPES) {
|
||||
var->GetMutable<std::vector<framework::Scope>>();
|
||||
} else if (var_type == proto::VarType::LOD_RANK_TABLE) {
|
||||
var->GetMutable<LoDRankTable>();
|
||||
} else if (var_type == proto::VarType::LOD_TENSOR_ARRAY) {
|
||||
var->GetMutable<LoDTensorArray>();
|
||||
} else if (var_type == proto::VarType::PLACE_LIST) {
|
||||
var->GetMutable<platform::PlaceList>();
|
||||
} else if (var_type == proto::VarType::READER) {
|
||||
var->GetMutable<ReaderHolder>();
|
||||
} else if (var_type == proto::VarType::CHANNEL) {
|
||||
var->GetMutable<ChannelHolder>();
|
||||
} else if (var_type == proto::VarType::RAW) {
|
||||
// GetMutable will be called in operator
|
||||
} else {
|
||||
PADDLE_THROW(
|
||||
"Variable type %d is not in "
|
||||
"[LOD_TENSOR, SELECTED_ROWS, FEED_MINIBATCH, FETCH_LIST, "
|
||||
"LOD_RANK_TABLE, PLACE_LIST, READER, CHANNEL, RAW]",
|
||||
var_type);
|
||||
}
|
||||
}
|
||||
|
||||
void NaiveExecutor::Prepare(Scope *parent_scope,
|
||||
const ProgramDesc &program_desc, int block_id,
|
||||
bool with_feed_fetch_ops) {
|
||||
if (!parent_scope) {
|
||||
scope_ = new framework::Scope;
|
||||
} else {
|
||||
scope_ = &parent_scope->NewScope();
|
||||
}
|
||||
CreateVariables(program_desc, scope_, block_id);
|
||||
CreateOps(program_desc, block_id, with_feed_fetch_ops);
|
||||
}
|
||||
|
||||
void NaiveExecutor::Run() {
|
||||
for (auto &op : ops_) {
|
||||
VLOG(4) << "run " << op->Type();
|
||||
op->Run(*scope_, place_);
|
||||
}
|
||||
}
|
||||
|
||||
void NaiveExecutor::CreateVariables(const ProgramDesc &desc, Scope *scope,
|
||||
int block_id) {
|
||||
PADDLE_ENFORCE(scope);
|
||||
auto &global_block = desc.Block(block_id);
|
||||
|
||||
const Scope *ancestor_scope = scope;
|
||||
while (ancestor_scope->parent()) {
|
||||
ancestor_scope = ancestor_scope->parent();
|
||||
}
|
||||
|
||||
if (ancestor_scope != scope) {
|
||||
for (auto &var : global_block.AllVars()) {
|
||||
if (var->Name() == framework::kEmptyVarName) {
|
||||
continue;
|
||||
}
|
||||
// Create persistable vars in ancestor scope.
|
||||
if (var->Persistable()) {
|
||||
auto *ptr = const_cast<Scope *>(ancestor_scope)->Var(var->Name());
|
||||
InitializeVariable(ptr, var->GetType());
|
||||
VLOG(3) << "Create Variable " << var->Name()
|
||||
<< " global, which pointer is " << ptr;
|
||||
} else { // Create temporary variables in local scope.
|
||||
auto *ptr = scope->Var(var->Name());
|
||||
InitializeVariable(ptr, var->GetType());
|
||||
VLOG(3) << "Create Variable " << var->Name()
|
||||
<< " locally, which pointer is " << ptr;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (auto &var : global_block.AllVars()) {
|
||||
auto *ptr = scope->Var(var->Name());
|
||||
InitializeVariable(ptr, var->GetType());
|
||||
VLOG(3) << "Create variable " << var->Name() << ", which pointer is "
|
||||
<< ptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NaiveExecutor::CreateOps(const ProgramDesc &desc, int block_id,
|
||||
bool with_feed_fetch_ops) {
|
||||
for (const auto &op_desc : desc.Block(block_id).AllOps()) {
|
||||
if (!with_feed_fetch_ops &&
|
||||
(op_desc->Type() == "feed" || op_desc->Type() == "fetch")) {
|
||||
string::PrettyLogEndl(string::Style::detail(), "--- skip [%s], %s -> %s",
|
||||
op_desc->Input("X")[0], op_desc->Type(),
|
||||
op_desc->Output("Out")[0]);
|
||||
continue;
|
||||
}
|
||||
ops_.emplace_back(OpRegistry::CreateOp(*op_desc));
|
||||
}
|
||||
}
|
||||
|
||||
LoDTensor *NaiveExecutor::FindTensor(const std::string &name) {
|
||||
PADDLE_ENFORCE(scope_, "Need to init scope first");
|
||||
auto *var = scope_->FindVar(name);
|
||||
PADDLE_ENFORCE(var, "No variable [%s] in the scope");
|
||||
auto *tensor = const_cast<LoDTensor *>(&var->Get<LoDTensor>());
|
||||
return tensor;
|
||||
}
|
||||
|
||||
void NaiveExecutor::CleanFeedFetchOps() {
|
||||
std::vector<std::unique_ptr<OperatorBase>> ops;
|
||||
for (auto &op : ops_) {
|
||||
if (op->Type() != "feed" && op->Type() != "fetch") {
|
||||
ops.emplace_back(std::move(op));
|
||||
}
|
||||
}
|
||||
ops_.swap(ops);
|
||||
}
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,63 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "paddle/fluid/framework/operator.h"
|
||||
#include "paddle/fluid/framework/program_desc.h"
|
||||
#include "paddle/fluid/framework/scope.h"
|
||||
#include "paddle/fluid/platform/device_context.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
/*
|
||||
* Simple, intuitive and effective. Only single thread is supported, and
|
||||
* currently designed for inference.
|
||||
*/
|
||||
class NaiveExecutor {
|
||||
public:
|
||||
explicit NaiveExecutor(const platform::Place& place) : place_(place) {}
|
||||
|
||||
// Create child scope.
|
||||
// Create variables.
|
||||
// @with_feed_fetch_ops: whether to work with the feed and fetch operators.
|
||||
void Prepare(Scope* parent_scope, const ProgramDesc& program_desc,
|
||||
int block_id, bool with_feed_fetch_ops);
|
||||
|
||||
// Run all the operators.
|
||||
void Run();
|
||||
|
||||
// Get an tensor to operating directly, without the need for feed_ops.
|
||||
LoDTensor* FindTensor(const std::string& name);
|
||||
|
||||
Scope* scope() { return scope_; }
|
||||
|
||||
void CleanFeedFetchOps();
|
||||
|
||||
protected:
|
||||
void CreateVariables(const ProgramDesc& desc, Scope* scope, int block_id);
|
||||
|
||||
void CreateOps(const ProgramDesc& desc, int block_id,
|
||||
bool with_feed_fetch_ops);
|
||||
|
||||
private:
|
||||
const platform::Place place_;
|
||||
// Catch the required resource to avoid recreate.
|
||||
std::vector<std::unique_ptr<OperatorBase>> ops_;
|
||||
Scope* scope_;
|
||||
};
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
@ -0,0 +1,70 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/naive_executor.h"
|
||||
#include <gtest/gtest.h>
|
||||
#include <algorithm>
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/framework/program_desc.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace framework {
|
||||
|
||||
TEST(NaiveExecutor, Basic) {
|
||||
ProgramDesc program;
|
||||
auto* main_block = program.MutableBlock(0);
|
||||
auto* a = main_block->Var("a"); // input
|
||||
auto* b = main_block->Var("b"); // input
|
||||
auto* c = main_block->Var("c"); // input
|
||||
a->SetType(proto::VarType::LOD_TENSOR);
|
||||
b->SetType(proto::VarType::LOD_TENSOR);
|
||||
c->SetType(proto::VarType::LOD_TENSOR);
|
||||
|
||||
auto* add = main_block->AppendOp();
|
||||
add->SetType("elementwise_add");
|
||||
add->SetInput("X", {"a"});
|
||||
add->SetInput("Y", {"b"});
|
||||
add->SetOutput("Out", {"c"});
|
||||
|
||||
auto place = platform::CPUPlace();
|
||||
NaiveExecutor exe(place);
|
||||
exe.Prepare(nullptr, program, 0, false /*with feed fetch ops*/);
|
||||
auto* a_tensor = exe.FindTensor("a");
|
||||
auto* b_tensor = exe.FindTensor("b");
|
||||
auto* c_tensor = exe.FindTensor("c");
|
||||
|
||||
a_tensor->Resize({1, 4});
|
||||
b_tensor->Resize({1, 4});
|
||||
c_tensor->Resize({1, 4});
|
||||
b_tensor->mutable_data<float>(place);
|
||||
a_tensor->mutable_data<float>(place);
|
||||
|
||||
float a_arr[] = {0, 1, 2, 3};
|
||||
float b_arr[] = {0.0, .1, .2, .3};
|
||||
|
||||
std::copy_n(a_arr, 4, a_tensor->mutable_data<float>(place));
|
||||
std::copy_n(b_arr, 4, b_tensor->mutable_data<float>(place));
|
||||
|
||||
exe.Run();
|
||||
|
||||
auto* c_data = c_tensor->mutable_data<float>(place);
|
||||
for (int i = 0; i < 4; i++) {
|
||||
EXPECT_NEAR(c_data[i], 1.1 * i, 1e-3);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace framework
|
||||
} // namespace paddle
|
||||
|
||||
USE_OP(elementwise_add);
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,67 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include "paddle/fluid/inference/api/paddle_inference_api.h"
|
||||
|
||||
DEFINE_string(dirname, "", "dirname to tests.");
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
using contrib::AnalysisConfig;
|
||||
|
||||
TEST(AnalysisPredictor, ZeroCopy) {
|
||||
AnalysisConfig config;
|
||||
config.model_dir = FLAGS_dirname + "/word2vec.inference.model";
|
||||
config.use_feed_fetch_ops = false;
|
||||
|
||||
auto predictor =
|
||||
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
|
||||
config);
|
||||
|
||||
auto w0 = predictor->GetInputTensor("firstw");
|
||||
auto w1 = predictor->GetInputTensor("secondw");
|
||||
auto w2 = predictor->GetInputTensor("thirdw");
|
||||
auto w3 = predictor->GetInputTensor("forthw");
|
||||
|
||||
w0->Reshape({4, 1});
|
||||
w1->Reshape({4, 1});
|
||||
w2->Reshape({4, 1});
|
||||
w3->Reshape({4, 1});
|
||||
|
||||
auto* w0_data = w0->mutable_data<int64_t>(PaddlePlace::kCPU);
|
||||
auto* w1_data = w1->mutable_data<int64_t>(PaddlePlace::kCPU);
|
||||
auto* w2_data = w2->mutable_data<int64_t>(PaddlePlace::kCPU);
|
||||
auto* w3_data = w3->mutable_data<int64_t>(PaddlePlace::kCPU);
|
||||
|
||||
for (int i = 0; i < 4; i++) {
|
||||
w0_data[i] = i;
|
||||
w1_data[i] = i;
|
||||
w2_data[i] = i;
|
||||
w3_data[i] = i;
|
||||
}
|
||||
|
||||
predictor->ZeroCopyRun();
|
||||
|
||||
auto out = predictor->GetOutputTensor("fc_1.tmp_2");
|
||||
PaddlePlace place;
|
||||
int size = 0;
|
||||
auto* out_data = out->data<float>(&place, &size);
|
||||
LOG(INFO) << "output size: " << size / sizeof(float);
|
||||
LOG(INFO) << "output_data: " << out_data;
|
||||
}
|
||||
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
@ -0,0 +1,111 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/framework/lod_tensor.h"
|
||||
#include "paddle/fluid/framework/scope.h"
|
||||
#include "paddle/fluid/inference/api/paddle_inference_api.h"
|
||||
#include "paddle/fluid/platform/enforce.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
void ZeroCopyTensor::Reshape(const std::vector<int> &shape) {
|
||||
PADDLE_ENFORCE(!name_.empty(),
|
||||
"Need to SetName first, so that the corresponding tensor can "
|
||||
"be retrieved.");
|
||||
PADDLE_ENFORCE(input_or_output_,
|
||||
"Can't reshape the output tensor, it is readonly");
|
||||
PADDLE_ENFORCE(scope_);
|
||||
auto *scope = static_cast<framework::Scope *>(scope_);
|
||||
auto *var = scope->FindVar(name_);
|
||||
PADDLE_ENFORCE(var, "No tensor called [%s] in the runtime scope", name_);
|
||||
auto *tensor = var->GetMutable<framework::LoDTensor>();
|
||||
tensor->Resize(framework::make_ddim(shape));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T *ZeroCopyTensor::mutable_data(PaddlePlace place) {
|
||||
auto *tensor = static_cast<framework::LoDTensor *>(FindTensor());
|
||||
switch (static_cast<int>(place)) {
|
||||
case static_cast<int>(PaddlePlace::kCPU): {
|
||||
return tensor->mutable_data<T>(platform::CPUPlace());
|
||||
}
|
||||
case static_cast<int>(PaddlePlace::kGPU): {
|
||||
return tensor->mutable_data<T>(platform::CUDAPlace());
|
||||
}
|
||||
default:
|
||||
PADDLE_THROW("Unsupported place: %d", static_cast<int>(place));
|
||||
break;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T *ZeroCopyTensor::data(PaddlePlace *place, int *size) {
|
||||
auto *tensor = static_cast<framework::LoDTensor *>(FindTensor());
|
||||
auto *res = tensor->data<T>();
|
||||
|
||||
if (platform::is_cpu_place(tensor->place())) {
|
||||
*place = PaddlePlace::kCPU;
|
||||
} else if (platform::is_gpu_place(tensor->place())) {
|
||||
*place = PaddlePlace::kGPU;
|
||||
} else {
|
||||
*place = PaddlePlace::kUNK;
|
||||
}
|
||||
|
||||
*size = tensor->numel();
|
||||
return res;
|
||||
}
|
||||
|
||||
template float *ZeroCopyTensor::data<float>(PaddlePlace *place, int *size);
|
||||
template int64_t *ZeroCopyTensor::data<int64_t>(PaddlePlace *place, int *size);
|
||||
template float *ZeroCopyTensor::mutable_data<float>(PaddlePlace place);
|
||||
template int64_t *ZeroCopyTensor::mutable_data<int64_t>(PaddlePlace place);
|
||||
|
||||
void *ZeroCopyTensor::FindTensor() const {
|
||||
PADDLE_ENFORCE(!name_.empty(),
|
||||
"Need to SetName first, so that the corresponding tensor can "
|
||||
"be retrieved.");
|
||||
PADDLE_ENFORCE(scope_);
|
||||
auto *scope = static_cast<framework::Scope *>(scope_);
|
||||
auto *var = scope->FindVar(name_);
|
||||
PADDLE_ENFORCE(var, "No tensor called [%s] in the runtime scope", name_);
|
||||
auto *tensor = var->GetMutable<framework::LoDTensor>();
|
||||
return tensor;
|
||||
}
|
||||
|
||||
std::vector<int64_t> ZeroCopyTensor::shape() {
|
||||
auto *tensor = static_cast<framework::LoDTensor *>(FindTensor());
|
||||
PADDLE_ENFORCE(tensor, "not found tensor called %s in the scope", name_);
|
||||
return framework::vectorize(tensor->dims());
|
||||
}
|
||||
|
||||
void ZeroCopyTensor::SetLoD(const std::vector<std::vector<size_t>> &x) {
|
||||
auto *tensor = static_cast<framework::LoDTensor *>(FindTensor());
|
||||
framework::LoD lod;
|
||||
for (auto &level : x) {
|
||||
lod.emplace_back(level);
|
||||
}
|
||||
tensor->set_lod(lod);
|
||||
}
|
||||
|
||||
std::vector<std::vector<size_t>> ZeroCopyTensor::lod() const {
|
||||
std::vector<std::vector<size_t>> res;
|
||||
auto *tensor = static_cast<framework::LoDTensor *>(FindTensor());
|
||||
for (auto &level : tensor->lod()) {
|
||||
res.emplace_back(level);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,46 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/inference/api/paddle_inference_api.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
void ZeroCopyTensor::Reshape(const std::vector<int> &shape) {}
|
||||
|
||||
template <typename T>
|
||||
T *ZeroCopyTensor::mutable_data(PaddlePlace place) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T *ZeroCopyTensor::data(PaddlePlace *place, int *size) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template float *ZeroCopyTensor::data<float>(PaddlePlace *place, int *size);
|
||||
template int64_t *ZeroCopyTensor::data<int64_t>(PaddlePlace *place, int *size);
|
||||
template float *ZeroCopyTensor::mutable_data(PaddlePlace place);
|
||||
template int64_t *ZeroCopyTensor::mutable_data(PaddlePlace place);
|
||||
|
||||
void *ZeroCopyTensor::FindTensor() const { return nullptr; }
|
||||
|
||||
std::vector<int64_t> ZeroCopyTensor::shape() { return {}; }
|
||||
|
||||
void ZeroCopyTensor::SetLoD(const std::vector<std::vector<size_t>> &x) {}
|
||||
|
||||
std::vector<std::vector<size_t>> ZeroCopyTensor::lod() const {
|
||||
return std::vector<std::vector<size_t>>();
|
||||
}
|
||||
|
||||
} // namespace paddle
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue