diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index 3d79ec5b00..b66fec1e04 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -1411,7 +1411,7 @@ CV_EXPORTS_W bool checkChessboard(InputArray img, Size size); - @ref CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description). - @ref CALIB_CB_MARKER The detected pattern must have a marker (see description). This should be used if an accurate camera calibration is required. -@param meta Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)). +@param meta Optional output array of detected corners (CV_8UC1 and size = cv::Size(columns,rows)). Each entry stands for one corner of the pattern and can have one of the following values: - 0 = no meta data attached - 1 = left-top corner of a black cell diff --git a/modules/core/include/opencv2/core/quaternion.hpp b/modules/core/include/opencv2/core/quaternion.hpp index e39065020c..d8882f6a54 100644 --- a/modules/core/include/opencv2/core/quaternion.hpp +++ b/modules/core/include/opencv2/core/quaternion.hpp @@ -57,7 +57,7 @@ class QuatEnum public: /** @brief Enum of Euler angles type. * - * Without considering the possibility of using two different convertions for the definition of the rotation axes , + * Without considering the possibility of using two different conversions for the definition of the rotation axes , * there exists twelve possible sequences of rotation axes, divided into two groups: * - Proper Euler angles (Z-X-Z, X-Y-X, Y-Z-Y, Z-Y-Z, X-Z-X, Y-X-Y) * - Tait–Bryan angles (X-Y-Z, Y-Z-X, Z-X-Y, X-Z-Y, Z-Y-X, Y-X-Z). @@ -273,7 +273,7 @@ public: * where \f$ q_{X, \theta_1} \f$ is created from @ref createFromXRot, \f$ q_{Y, \theta_2} \f$ is created from @ref createFromYRot, * \f$ q_{Z, \theta_3} \f$ is created from @ref createFromZRot. * @param angles the Euler angles in a vector of length 3 - * @param eulerAnglesType the convertion Euler angles type + * @param eulerAnglesType the conversion Euler angles type */ static Quat<_Tp> createFromEulerAngles(const Vec<_Tp, 3> &angles, QuatEnum::EulerAnglesType eulerAnglesType); @@ -1610,7 +1610,7 @@ public: * EXT_ZXZ| \f$ \theta_1 = \arctan2(m_{31},m_{32}) \\\theta_2 = \arccos(m_{33}) \\\theta_3 = \arctan2(-m_{13},m_{23})\f$| \f$ \theta_1=0\\ \theta_2=0\\ \theta_3=\arctan2(m_{21},m_{22}) \f$| \f$ \theta_1= 0\\ \theta_2=\pi\\ \theta_3=\arctan2(m_{21},m_{11}) \f$ * EXT_ZYZ| \f$ \theta_1 = \arctan2(m_{32},-m_{31})\\\theta_2 = \arccos(m_{33}) \\\theta_3 = \arctan2(m_{23},m_{13}) \f$| \f$ \theta_1=0\\ \theta_2=0\\ \theta_3=\arctan2(m_{21},m_{11}) \f$| \f$ \theta_1= 0\\ \theta_2=\pi\\ \theta_3=\arctan2(m_{21},m_{11}) \f$ * - * @param eulerAnglesType the convertion Euler angles type + * @param eulerAnglesType the conversion Euler angles type */ Vec<_Tp, 3> toEulerAngles(QuatEnum::EulerAnglesType eulerAnglesType); diff --git a/modules/dnn/src/layers/instance_norm_layer.cpp b/modules/dnn/src/layers/instance_norm_layer.cpp index ae61f15656..d695ca12f4 100644 --- a/modules/dnn/src/layers/instance_norm_layer.cpp +++ b/modules/dnn/src/layers/instance_norm_layer.cpp @@ -221,7 +221,7 @@ public: #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { - // onnx to openvino convertion: https://github.com/openvinotoolkit/openvino/blob/2023.1.0/src/frontends/onnx/frontend/src/op/instance_norm.cpp + // onnx to openvino conversion: https://github.com/openvinotoolkit/openvino/blob/2023.1.0/src/frontends/onnx/frontend/src/op/instance_norm.cpp auto ieInpNode = nodes[0].dynamicCast()->node; const auto &input_shape = ieInpNode.get_shape(); diff --git a/modules/dnn/src/layers/matmul_layer.cpp b/modules/dnn/src/layers/matmul_layer.cpp index 448af27c18..4d232b7a3e 100644 --- a/modules/dnn/src/layers/matmul_layer.cpp +++ b/modules/dnn/src/layers/matmul_layer.cpp @@ -423,7 +423,7 @@ class MatMulLayerImpl CV_FINAL : public MatMulLayer { op->set_input_x1_by_name(*input_A_node, input_A_wrapper->name.c_str()); op->update_input_desc_x1(*input_A_desc); // set inputs : x2 - if (blobs.empty()) { // varaible input B + if (blobs.empty()) { // variable input B auto input_B_wrapper = inputs[1].dynamicCast(); auto input_B_desc = input_B_wrapper->getTensorDesc(); auto input_B_node = nodes[1].dynamicCast()->getOp(); diff --git a/modules/dnn/src/layers/nary_eltwise_layers.cpp b/modules/dnn/src/layers/nary_eltwise_layers.cpp index 68c76906c6..1cb32e9170 100644 --- a/modules/dnn/src/layers/nary_eltwise_layers.cpp +++ b/modules/dnn/src/layers/nary_eltwise_layers.cpp @@ -1036,7 +1036,7 @@ public: else if (op == OPERATION::LESS_EQUAL) node = std::make_shared(inp0, inp1); // Ideally we should do this but int32 internal blobs are converted to float32 data type in inference. - // TODO: Remove data type convertion when we have type inference. + // TODO: Remove data type conversion when we have type inference. else if (op == OPERATION::MOD) { auto inp0_i64 = std::make_shared(inp0, ov::element::i64); auto inp1_i64 = std::make_shared(inp1, ov::element::i64); diff --git a/modules/gapi/samples/onevpl_infer_with_advanced_device_selection.cpp b/modules/gapi/samples/onevpl_infer_with_advanced_device_selection.cpp index de1d233ae5..8aac8fbaab 100644 --- a/modules/gapi/samples/onevpl_infer_with_advanced_device_selection.cpp +++ b/modules/gapi/samples/onevpl_infer_with_advanced_device_selection.cpp @@ -434,7 +434,7 @@ int main(int argc, char *argv[]) { // // - you should pass such wrappers as constructor arguments for each component in pipeline: // a) use extended constructor for `onevpl::GSource` for activating predefined device & context - // b) use `cfgContextParams` method of `cv::gapi::ie::Params` to enable `PreprocesingEngine` + // b) use `cfgContextParams` method of `cv::gapi::ie::Params` to enable `PreprocessingEngine` // for predefined device & context // c) use `InferenceEngine::ParamMap` to activate remote ctx in Inference Engine for given // device & context @@ -577,14 +577,14 @@ int main(int argc, char *argv[]) { } #endif // HAVE_INF_ENGINE - // turn on VPP PreprocesingEngine if available & requested + // turn on VPP PreprocessingEngine if available & requested if (flow_settings->vpl_preproc_enable) { if (is_gpu(preproc_device)) { - // activate VPP PreprocesingEngine on GPU + // activate VPP PreprocessingEngine on GPU face_net.cfgPreprocessingParams(gpu_accel_device.value(), gpu_accel_ctx.value()); } else { - // activate VPP PreprocesingEngine on CPU + // activate VPP PreprocessingEngine on CPU face_net.cfgPreprocessingParams(cpu_accel_device, cpu_accel_ctx); } diff --git a/modules/imgcodecs/src/grfmt_tiff.cpp b/modules/imgcodecs/src/grfmt_tiff.cpp index 2061f14900..902e061e31 100644 --- a/modules/imgcodecs/src/grfmt_tiff.cpp +++ b/modules/imgcodecs/src/grfmt_tiff.cpp @@ -988,7 +988,7 @@ bool TiffDecoder::readData( Mat& img ) break; default: - CV_LOG_ONCE_ERROR(NULL, "OpenCV TIFF(line " << __LINE__ << "): Unsupported convertion :" + CV_LOG_ONCE_ERROR(NULL, "OpenCV TIFF(line " << __LINE__ << "): Unsupported conversion :" << " bpp = " << bpp << " ncn = " << (int)ncn << " wanted_channels =" << wanted_channels ); break;