Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions include/xnnpack.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,14 @@ extern "C" {
/// Allow IEEE FP16 inference in a Runtime.
///
/// Note: this flag hints XNNPACK to consider IEEE FP16 inference, but does not guarantee it.
/// Deprecated, convert the subgraph to FP16 instead.
#define XNN_FLAG_HINT_FP16_INFERENCE 0x00000002

/// Force IEEE FP16 inference in a Runtime, and fail if FP16 inference is not possible.
///
/// Note: this flag guarantees that XNNPACK will use IEEE FP16 inference, or fail to create the Runtime object.
/// Warning: on x86 systems FP16 computations will be emulated at a substantial performance cost.
/// Deprecated, convert the subgraph to FP16 instead.
#define XNN_FLAG_FORCE_FP16_INFERENCE 0x00000004

/// Enable timing of each operator's runtime.
Expand Down
12 changes: 12 additions & 0 deletions src/runtime.c
Original file line number Diff line number Diff line change
Expand Up @@ -621,6 +621,18 @@ static enum xnn_status create_runtime_impl(
}
}

if (flags & XNN_FLAG_FORCE_FP16_INFERENCE) {
xnn_log_warning(
"XNN_FLAG_FORCE_FP16_INFERENCE is deprecated and will be removed soon: "
"convert your tensors to FP16 instead.");
}

if (flags & XNN_FLAG_HINT_FP16_INFERENCE) {
xnn_log_warning(
"XNN_FLAG_HINT_FP16_INFERENCE is deprecated and will be removed soon: "
"convert your tensors to FP16 instead.");
}

if (flags & XNN_FLAG_SLOW_CONSISTENT_ARITHMETIC) {
xnn_log_warning(
"XNN_FLAG_SLOW_CONSISTENT_ARITHMETIC is enabled: performance will be "
Expand Down
Loading