Skip to content

Commit

Permalink
Set is_dynamic to false for layout data_padding only for FULLY_CONNECTED
Browse files Browse the repository at this point in the history
Signed-off-by: yuan.xiong <yuan.xiong@intel.com>
  • Loading branch information
yuanxion committed Feb 5, 2025
1 parent de6ec66 commit d9b845c
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -876,7 +876,7 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) {
}
}

kernel_selector::data_tensor convert_data_tensor(const layout& l, const tensor view_offset, bool is_runtime) {
kernel_selector::data_tensor convert_data_tensor(const layout& l, const tensor view_offset, bool is_runtime_fc) {
const auto& pad = l.data_padding;
const auto& vals_original = l.get_partial_shape();

Expand Down Expand Up @@ -910,7 +910,7 @@ kernel_selector::data_tensor convert_data_tensor(const layout& l, const tensor v
elm.pitch = pitch;
elm.pad.before = dynamic_pad_dims[tensor_index] ? 0 : lp;
elm.pad.after = dynamic_pad_dims[tensor_index] ? 0 : up;
elm.pad.is_dynamic = is_runtime ? false : dynamic_pad_dims[tensor_index];
elm.pad.is_dynamic = is_runtime_fc ? false : dynamic_pad_dims[tensor_index];
elm.is_dynamic = d.is_dynamic();

pitch *= (reserved_in_mem_count + lp + up);
Expand Down
16 changes: 10 additions & 6 deletions src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ kernel_selector::data_layout to_data_layout(format f);
cldnn::format from_data_layout(kernel_selector::data_layout l);
kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped);
cldnn::format::type from_weights_layout(kernel_selector::weights_layout l);
kernel_selector::data_tensor convert_data_tensor(const layout& l, const tensor view_offset = tensor {}, bool is_runtime = false);
kernel_selector::data_tensor convert_data_tensor(const layout& l, const tensor view_offset = tensor {}, bool is_runtime_fc = false);
kernel_selector::weights_tensor convert_weights_tensor(const layout& l, bool is_grouped = false);
layout from_weights_tensor(const kernel_selector::weights_tensor& t);
kernel_selector::activation_function get_kernel_selector_activation_param(activation_func activation_func);
Expand Down Expand Up @@ -286,22 +286,26 @@ inline std::shared_ptr<WeightsReorderParams> create_weights_reorder_params(const
return std::make_shared<WeightsReorderParams>(from_weights_tensor(params.src), from_weights_tensor(params.dest), params.rotate);
}

inline void update_shapes(kernel_selector::Params& p, const kernel_impl_params& impl_param, bool is_runtime = true) {
inline void update_shapes(kernel_selector::Params& p, const kernel_impl_params& impl_param) {
bool is_runtime_fc = false;
if (p.GetType() == kernel_selector::KernelType::FULLY_CONNECTED) {
is_runtime_fc = true;
}
auto& bp = static_cast<kernel_selector::base_params&>(p);
for (size_t i = 0; i < bp.inputs.size(); i++) {
bp.inputs[i] = convert_data_tensor(impl_param.input_layouts[i], tensor {}, is_runtime);
bp.inputs[i] = convert_data_tensor(impl_param.input_layouts[i], tensor {}, is_runtime_fc);
}
for (size_t i = 0; i < bp.outputs.size(); i++) {
bp.outputs[i] = convert_data_tensor(impl_param.output_layouts[i], tensor {}, is_runtime);
bp.outputs[i] = convert_data_tensor(impl_param.output_layouts[i], tensor {}, is_runtime_fc);
}

for (size_t i = 0; i < bp.fused_ops.size(); i++) {
const auto& fused_prim = impl_param.fused_desc[i];
auto& fd = bp.fused_ops[i];
fd.output_tensor = convert_data_tensor(fused_prim.output_layout, tensor {}, is_runtime);
fd.output_tensor = convert_data_tensor(fused_prim.output_layout, tensor {}, is_runtime_fc);
fd.tensors.clear();
for (size_t i = fd.dep_idx_start; i < fd.dep_idx_start + fd.dep_size; i++) {
fd.tensors.push_back(convert_data_tensor(impl_param.get_input_layout(i), tensor {}, is_runtime));
fd.tensors.push_back(convert_data_tensor(impl_param.get_input_layout(i), tensor {}, is_runtime_fc));
}
}
}
Expand Down

0 comments on commit d9b845c

Please # to comment.