forked from pytorch/QNNPACK
-
Notifications
You must be signed in to change notification settings - Fork 0
/
softargmax.cc
95 lines (77 loc) · 2.78 KB
/
softargmax.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <algorithm>
#include <cmath>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack.h>
#include <benchmark/benchmark.h>
static void softargmax_q8(benchmark::State& state) {
const size_t batchSize = static_cast<size_t>(state.range(0));
const size_t channels = static_cast<size_t>(state.range(1));
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input(batchSize * channels);
std::vector<uint8_t> output(batchSize * channels);
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
qnnp_status status = qnnp_initialize();
if (status != qnnp_status_success) {
state.SkipWithError("failed to initialize QNNPACK");
}
qnnp_operator_t softArgMaxOperator = nullptr;
status = qnnp_create_softargmax_nc_q8(
channels, 1.0f /* input scale */,
0 /* output zero point */, 1.0f / 256.0f /* output scale */,
0 /* flags */, &softArgMaxOperator);
if (status != qnnp_status_success || softArgMaxOperator == nullptr) {
state.SkipWithError("failed to create SoftArgMax operator");
}
status = qnnp_setup_softargmax_nc_q8(
softArgMaxOperator,
batchSize,
input.data(), channels /* input:stride */,
output.data(), channels /* output:stride */);
if (status != qnnp_status_success) {
state.SkipWithError("failed to setup SoftArgMax operator");
}
for (auto _ : state) {
status = qnnp_run_operator(softArgMaxOperator, nullptr /* thread pool */);
if (status != qnnp_status_success) {
state.SkipWithError("failed to run SoftArgMax operator");
}
}
const size_t itemsPerIteration = batchSize * channels;
state.SetItemsProcessed(int64_t(state.iterations()) * int64_t(itemsPerIteration));
const size_t bytesPerIteration = 2 * itemsPerIteration * sizeof(uint8_t);
state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytesPerIteration));
status = qnnp_delete_operator(softArgMaxOperator);
if (status != qnnp_status_success) {
state.SkipWithError("failed to delete SoftArgMax operator");
}
}
static void CharacteristicArguments(benchmark::internal::Benchmark* b)
{
b->ArgNames({"N", "C"});
/* CIFAR-10 */
b->Args({1, 10});
/* CIFAR-100 */
b->Args({1, 100});
/* ImageNet-1K */
b->Args({1, 1000});
/* ImageNet-1K+1 */
b->Args({1, 1001});
/* ImageNet-22K */
b->Args({1, 21841});
}
BENCHMARK(softargmax_q8)->Apply(CharacteristicArguments);
#ifndef QNNPACK_BENCHMARK_NO_MAIN
BENCHMARK_MAIN();
#endif