Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit669b503

Browse files
committed
add perf
1 parent847d53f commit669b503

File tree

1 file changed

+57
-0
lines changed

1 file changed

+57
-0
lines changed

‎modules/dnn/perf/perf_layer.cpp‎

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -739,6 +739,62 @@ PERF_TEST_P_(Layer_InstanceNorm, InstanceNorm)
739739
test_layer({N, C, H, W});
740740
}
741741

742+
structLayer_Attention :publicTestBaseWithParam<tuple<Backend, Target>> {
743+
voidtest_layer(const std::vector<int> x_shape,const std::vector<int> qkv_hidden_sizes,constint num_heads) {
744+
int backendId = get<0>(GetParam());
745+
int targetId = get<1>(GetParam());
746+
747+
auto qk_hidden_size = qkv_hidden_sizes[0];
748+
auto v_hidden_size = qkv_hidden_sizes[2];
749+
750+
auto input_hidden_size = x_shape[2];
751+
auto hidden_size = qk_hidden_size + qk_hidden_size + v_hidden_size;
752+
753+
Matx(x_shape, CV_32F);
754+
Matweight(std::vector<int>{input_hidden_size, hidden_size}, CV_32F);
755+
Matbias(std::vector<int>{hidden_size}, CV_32F);
756+
757+
randu(x,0.f,1.f);
758+
randu(weight,0.f,1.f);
759+
randu(bias,0.f,1.f);
760+
761+
LayerParams lp;
762+
lp.type ="Attention";
763+
lp.name ="testLayer";
764+
lp.set("num_heads", num_heads);
765+
lp.set("qkv_hidden_sizes",DictValue::arrayInt(qkv_hidden_sizes.data(), qkv_hidden_sizes.size()));
766+
767+
Net net;
768+
int id = net.addLayerToPrev(lp.name, lp.type, lp);
769+
net.connect(0,0, id,0);
770+
net.connect(0,1, id,1);
771+
net.connect(0,2, id,2);
772+
773+
{
774+
std::vector<std::string> input_names{"x","weight","bias"};
775+
net.setInputsNames(input_names);
776+
net.setInput(x, input_names[0]);
777+
net.setInput(weight, input_names[1]);
778+
net.setInput(bias, input_names[2]);
779+
780+
net.setPreferableBackend(backendId);
781+
net.setPreferableTarget(targetId);
782+
Mat out = net.forward();
783+
}
784+
785+
TEST_CYCLE()
786+
{
787+
Mat out = net.forward();
788+
}
789+
790+
SANITY_CHECK_NOTHING();
791+
}
792+
};
793+
794+
PERF_TEST_P_(Layer_Attention, VisionTransformer) {
795+
test_layer({1,197,768}, {768,768,768},12);
796+
}
797+
742798
INSTANTIATE_TEST_CASE_P(/**/, Layer_Slice, dnnBackendsAndTargets(false,false));
743799
INSTANTIATE_TEST_CASE_P(/**/, Layer_NaryEltwise, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));
744800
#ifdef HAVE_CUDA
@@ -750,6 +806,7 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_LayerNorm, testing::Values(std::make_tuple(D
750806
INSTANTIATE_TEST_CASE_P(/**/, Layer_LayerNormExpanded, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));
751807
INSTANTIATE_TEST_CASE_P(/**/, Layer_GatherElements, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));
752808
INSTANTIATE_TEST_CASE_P(/**/, Layer_InstanceNorm, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));
809+
INSTANTIATE_TEST_CASE_P(/**/, Layer_Attention, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));
753810

754811

755812
typedef TestBaseWithParam<tuple<Vec4i,int,bool, tuple<Backend, Target> > > Layer_FullyConnected;

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp