Visible to Intel only — GUID: GUID-01544F28-4470-4CF7-8A9B-6C3D5A4CFD34
Abs
AbsBackward
Add
AvgPool
AvgPoolBackward
BatchNormForwardTraining
BatchNormInference
BatchNormTrainingBackward
BiasAdd
BiasAddBackward
Clamp
ClampBackward
Concat
Convolution
ConvolutionBackwardData
ConvolutionBackwardWeights
ConvTranspose
ConvTransposeBackwardData
ConvTransposeBackwardWeights
Dequantize
Divide
DynamicDequantize
DynamicQuantize
Elu
EluBackward
End
Exp
GELU
GELUBackward
HardSigmoid
HardSigmoidBackward
HardSwish
HardSwishBackward
Interpolate
InterpolateBackward
LayerNorm
LayerNormBackward
LeakyReLU
Log
LogSoftmax
LogSoftmaxBackward
MatMul
Maximum
MaxPool
MaxPoolBackward
Minimum
Mish
MishBackward
Multiply
Pow
PReLU
PReLUBackward
Quantize
Reciprocal
ReduceL1
ReduceL2
ReduceMax
ReduceMean
ReduceMin
ReduceProd
ReduceSum
ReLU
ReLUBackward
Reorder
Round
Select
Sigmoid
SigmoidBackward
SoftMax
SoftMaxBackward
SoftPlus
SoftPlusBackward
Sqrt
SqrtBackward
Square
SquaredDifference
StaticReshape
StaticTranspose
Subtract
Tanh
TanhBackward
TypeCast
Wildcard
enum dnnl_alg_kind_t
enum dnnl_normalization_flags_t
enum dnnl_primitive_kind_t
enum dnnl_prop_kind_t
enum dnnl_query_t
enum dnnl::normalization_flags
enum dnnl::query
struct dnnl_exec_arg_t
struct dnnl_primitive
struct dnnl_primitive_desc
struct dnnl::primitive
struct dnnl::primitive_desc
struct dnnl::primitive_desc_base
enum dnnl_rnn_direction_t
enum dnnl_rnn_flags_t
enum dnnl::rnn_direction
enum dnnl::rnn_flags
struct dnnl::augru_backward
struct dnnl::augru_forward
struct dnnl::gru_backward
struct dnnl::gru_forward
struct dnnl::lbr_augru_backward
struct dnnl::lbr_augru_forward
struct dnnl::lbr_gru_backward
struct dnnl::lbr_gru_forward
struct dnnl::lstm_backward
struct dnnl::lstm_forward
struct dnnl::rnn_primitive_desc_base
struct dnnl::vanilla_rnn_backward
struct dnnl::vanilla_rnn_forward
Visible to Intel only — GUID: GUID-01544F28-4470-4CF7-8A9B-6C3D5A4CFD34
sycl_interop_buffer cpp
Annotated version: Getting started on both CPU and GPU with SYCL extensions API
Annotated version: Getting started on both CPU and GPU with SYCL extensions API
/*******************************************************************************
* Copyright 2019-2023 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
// [Prologue]
#include "example_utils.hpp"
#include "oneapi/dnnl/dnnl.hpp"
#include "oneapi/dnnl/dnnl_debug.h"
#include "oneapi/dnnl/dnnl_sycl.hpp"
#if __has_include(<sycl/sycl.hpp>)
#include <sycl/sycl.hpp>
#elif __has_include(<CL/sycl.hpp>)
#include <CL/sycl.hpp>
#else
#error "Unsupported compiler"
#endif
#include <cassert>
#include <iostream>
#include <numeric>
using namespace dnnl;
using namespace sycl;
// [Prologue]
class kernel_tag;
void sycl_interop_buffer_tutorial(engine::kind engine_kind) {
// [Initialize engine]
engine eng(engine_kind, 0);
// [Initialize engine]
// [Initialize stream]
dnnl::stream strm(eng);
// [Initialize stream]
// [memory alloc]
memory::dims tz_dims = {2, 3, 4, 5};
const size_t N = std::accumulate(tz_dims.begin(), tz_dims.end(), (size_t)1,
std::multiplies<size_t>());
memory::desc mem_d(
tz_dims, memory::data_type::f32, memory::format_tag::nchw);
memory mem = sycl_interop::make_memory(
mem_d, eng, sycl_interop::memory_kind::buffer);
// [memory alloc]
// [get sycl buf]
auto sycl_buf = sycl_interop::get_buffer<float>(mem);
// [get sycl buf]
// [sycl kernel exec]
queue q = sycl_interop::get_queue(strm);
q.submit([&](handler &cgh) {
auto a = sycl_buf.get_access<access::mode::write>(cgh);
cgh.parallel_for<kernel_tag>(range<1>(N), [=](id<1> i) {
int idx = (int)i[0];
a[idx] = (idx % 2) ? -idx : idx;
});
});
// [sycl kernel exec]
// [relu creation]
auto relu_pd = eltwise_forward::primitive_desc(eng, prop_kind::forward,
algorithm::eltwise_relu, mem_d, mem_d, 0.0f);
auto relu = eltwise_forward(relu_pd);
// [relu creation]
// [relu exec]
relu.execute(strm, {{DNNL_ARG_SRC, mem}, {DNNL_ARG_DST, mem}});
strm.wait();
// [relu exec]
// [Check the results]
auto host_acc = sycl_buf.get_host_access();
for (size_t i = 0; i < N; i++) {
float exp_value = (i % 2) ? 0.0f : i;
if (host_acc[i] != (float)exp_value)
throw std::string(
"Unexpected output, find a negative value after the ReLU "
"execution.");
}
// [Check the results]
}
// [Main]
int main(int argc, char **argv) {
int exit_code = 0;
engine::kind engine_kind = parse_engine_kind(argc, argv);
try {
sycl_interop_buffer_tutorial(engine_kind);
} catch (dnnl::error &e) {
std::cout << "oneDNN error caught: " << std::endl
<< "\tStatus: " << dnnl_status2str(e.status) << std::endl
<< "\tMessage: " << e.what() << std::endl;
exit_code = 1;
} catch (std::string &e) {
std::cout << "Error in the example: " << e << "." << std::endl;
exit_code = 2;
} catch (exception &e) {
std::cout << "Error in the example: " << e.what() << "." << std::endl;
exit_code = 3;
}
std::cout << "Example " << (exit_code ? "failed" : "passed") << " on "
<< engine_kind2str_upper(engine_kind) << "." << std::endl;
finalize();
return exit_code;
}
// [Main]