diff --git a/Eigen/src/Core/ArrayBase.h b/Eigen/src/Core/ArrayBase.h index 27e1845cc..28397e5cf 100644 --- a/Eigen/src/Core/ArrayBase.h +++ b/Eigen/src/Core/ArrayBase.h @@ -23,7 +23,7 @@ template class MatrixWrapper; * * An array is similar to a dense vector or matrix. While matrices are mathematical * objects with well defined linear algebra operators, an array is just a collection - * of scalar values arranged in a one or two dimensionnal fashion. As the main consequence, + * of scalar values arranged in a one or two dimensional fashion. As the main consequence, * all operations applied to an array are performed coefficient wise. Furthermore, * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient * constructors allowing to easily write generic code working for both scalar values diff --git a/Eigen/src/Core/DenseCoeffsBase.h b/Eigen/src/Core/DenseCoeffsBase.h index e02652fe5..46d8730cb 100644 --- a/Eigen/src/Core/DenseCoeffsBase.h +++ b/Eigen/src/Core/DenseCoeffsBase.h @@ -45,7 +45,7 @@ class DenseCoeffsBase : public EigenBase // - This is the return type of the coeff() method. // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value). - // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems + // - The is_arithmetic check is required since "const int", "const double", etc. will cause warnings on some systems // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is // not possible, since the underlying expressions might not offer a valid address the reference could be referring to. typedef typename internal::conditional::Flags&LvalueBit), diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h index ecff0b761..42e92c20b 100644 --- a/Eigen/src/Core/ProductEvaluators.h +++ b/Eigen/src/Core/ProductEvaluators.h @@ -429,8 +429,8 @@ struct generic_product_impl // 3 - it makes this fallback consistent with the heavy GEMM routine. // 4 - it fully by-passes huge stack allocation attempts when multiplying huge fixed-size matrices. // (see https://stackoverflow.com/questions/54738495) - // For small fixed sizes matrices, howver, the gains are less obvious, it is sometimes x2 faster, but sometimes x3 slower, - // and the behavior depends also a lot on the compiler... This is why this re-writting strategy is currently + // For small fixed sizes matrices, however, the gains are less obvious, it is sometimes x2 faster, but sometimes x3 slower, + // and the behavior depends also a lot on the compiler... This is why this re-writing strategy is currently // enabled only when falling back from the main GEMM. template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE diff --git a/Eigen/src/Core/Ref.h b/Eigen/src/Core/Ref.h index 0ded33daa..3e1d99cfa 100644 --- a/Eigen/src/Core/Ref.h +++ b/Eigen/src/Core/Ref.h @@ -300,7 +300,7 @@ template class Ref typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0) { EIGEN_STATIC_ASSERT(bool(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); - // Construction must pass since we will not create temprary storage in the non-const case. + // Construction must pass since we will not create temporary storage in the non-const case. const bool success = Base::construct(expr.derived()); EIGEN_UNUSED_VARIABLE(success) eigen_assert(success); diff --git a/Eigen/src/Core/arch/Default/Half.h b/Eigen/src/Core/arch/Default/Half.h index 299946cb9..df82612a5 100644 --- a/Eigen/src/Core/arch/Default/Half.h +++ b/Eigen/src/Core/arch/Default/Half.h @@ -262,7 +262,7 @@ namespace half_impl { #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && \ EIGEN_CUDA_ARCH >= 530) || \ (defined(EIGEN_HAS_HIP_FP16) && defined(HIP_DEVICE_COMPILE)) -// Note: We deliberatly do *not* define this to 1 even if we have Arm's native +// Note: We deliberately do *not* define this to 1 even if we have Arm's native // fp16 type since GPU halfs are rather different from native CPU halfs. // TODO: Rename to something like EIGEN_HAS_NATIVE_GPU_FP16 #define EIGEN_HAS_NATIVE_FP16 diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h index a1b8b6895..3d915026d 100755 --- a/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/Eigen/src/Core/arch/SSE/PacketMath.h @@ -622,7 +622,7 @@ template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) #ifdef EIGEN_VECTORIZE_SSE4_1 template<> EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) { - // Unfortunatly _mm_round_ps doesn't have a rounding mode to implement numext::round. + // Unfortunately _mm_round_ps doesn't have a rounding mode to implement numext::round. const Packet4f mask = pset1frombits(0x80000000u); const Packet4f prev0dot5 = pset1frombits(0x3EFFFFFFu); return _mm_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO); diff --git a/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h b/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h index a45a5bcb0..2b96587f4 100644 --- a/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +++ b/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h @@ -168,7 +168,7 @@ class PointerMapper { /** * Obtain the insertion point in the pointer map for * a pointer of the given size. - * \param requiredSize Size attemted to reclaim + * \param requiredSize Size attempted to reclaim */ typename pointerMap_t::iterator get_insertion_point(size_t requiredSize) { typename pointerMap_t::iterator retVal; diff --git a/Eigen/src/Core/functors/BinaryFunctors.h b/Eigen/src/Core/functors/BinaryFunctors.h index 64def5efe..88e2e8a8a 100644 --- a/Eigen/src/Core/functors/BinaryFunctors.h +++ b/Eigen/src/Core/functors/BinaryFunctors.h @@ -358,7 +358,7 @@ struct functor_traits > { PacketAccess = (!NumTraits::IsComplex && !NumTraits::IsInteger && packet_traits::HasExp && packet_traits::HasLog && packet_traits::HasRound && packet_traits::HasCmp && - // Temporarly disable packet access for half/bfloat16 until + // Temporarily disable packet access for half/bfloat16 until // accuracy is improved. !is_same::value && !is_same::value ) diff --git a/Eigen/src/Core/functors/UnaryFunctors.h b/Eigen/src/Core/functors/UnaryFunctors.h index 1827c1b33..044f7dd4f 100644 --- a/Eigen/src/Core/functors/UnaryFunctors.h +++ b/Eigen/src/Core/functors/UnaryFunctors.h @@ -1037,7 +1037,7 @@ struct scalar_logistic_op { * Uses just a 9/10-degree rational interpolant which * interpolates 1/(1+exp(-x)) - 0.5 up to a couple of ulps in the range * [-9, 18]. Below -9 we use the more accurate approximation - * 1/(1+exp(-x)) ~= exp(x), and above 18 the logistic function is 1 withing + * 1/(1+exp(-x)) ~= exp(x), and above 18 the logistic function is 1 within * one ulp. The shifted logistic is interpolated because it was easier to * make the fit converge. * diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h index db915a6fa..89e999b0d 100644 --- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -797,7 +797,7 @@ public: typedef typename conditional::type ResPacket; typedef typename conditional::type AccPacket; - // this actualy holds 8 packets! + // this actually holds 8 packets! typedef QuadPacket RhsPacketx4; EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); } diff --git a/Eigen/src/Core/products/GeneralMatrixVector.h b/Eigen/src/Core/products/GeneralMatrixVector.h index e9d1947a3..b775dbe99 100644 --- a/Eigen/src/Core/products/GeneralMatrixVector.h +++ b/Eigen/src/Core/products/GeneralMatrixVector.h @@ -66,7 +66,7 @@ public: /* Optimized col-major matrix * vector product: * This algorithm processes the matrix per vertical panels, - * which are then processed horizontaly per chunck of 8*PacketSize x 1 vertical segments. + * which are then processed horizontally per chunck of 8*PacketSize x 1 vertical segments. * * Mixing type logic: C += alpha * A * B * | A | B |alpha| comments diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index a33b6698e..a0620b383 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -649,7 +649,7 @@ // The macros EIGEN_HAS_CXX?? defines a rough estimate of available c++ features -// but in practice we should not rely on them but rather on the availabilty of +// but in practice we should not rely on them but rather on the availability of // individual features as defined later. // This is why there is no EIGEN_HAS_CXX17. // FIXME: get rid of EIGEN_HAS_CXX14 and maybe even EIGEN_HAS_CXX11. diff --git a/Eigen/src/Core/util/Serializer.h b/Eigen/src/Core/util/Serializer.h index ca7775f0d..7ec5eedd5 100644 --- a/Eigen/src/Core/util/Serializer.h +++ b/Eigen/src/Core/util/Serializer.h @@ -21,7 +21,7 @@ namespace Eigen { /** * Serializes an object to a memory buffer. * - * Useful for transfering data (e.g. back-and-forth to a device). + * Useful for transferring data (e.g. back-and-forth to a device). */ template class Serializer; diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h index 531fda1a5..b4f82492e 100644 --- a/Eigen/src/Eigenvalues/ComplexSchur.h +++ b/Eigen/src/Eigenvalues/ComplexSchur.h @@ -261,7 +261,7 @@ template class ComplexSchur friend struct internal::complex_schur_reduce_to_hessenberg::IsComplex>; }; -/** If m_matT(i+1,i) is neglegible in floating point arithmetic +/** If m_matT(i+1,i) is negligible in floating point arithmetic * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and * return true, else return false. */ template diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index bbb667640..7ecaf21eb 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -54,7 +54,7 @@ template struct traits > * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. * - * The status of the computation can be retrived using the \a info() method. Unless \a info() returns \a Success, the results should be not + * The status of the computation can be retrieved using the \a info() method. Unless \a info() returns \a Success, the results should be not * considered well defined. * * If the input matrix has inf or nan coefficients, the result of the computation is undefined, and \a info() will return \a InvalidInput, but the computation is guaranteed to diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h index bf6976751..521331c14 100644 --- a/Eigen/src/SparseCore/SparseBlock.h +++ b/Eigen/src/SparseCore/SparseBlock.h @@ -469,7 +469,7 @@ template class unary_evaluator, IteratorBased>::InnerVectorInnerIterator : public EvalIterator { - // NOTE MSVC fails to compile if we don't explicitely "import" IsRowMajor from unary_evaluator + // NOTE MSVC fails to compile if we don't explicitly "import" IsRowMajor from unary_evaluator // because the base class EvalIterator has a private IsRowMajor enum too. (bug #1786) // NOTE We cannot call it IsRowMajor because it would shadow unary_evaluator::IsRowMajor enum { XprIsRowMajor = unary_evaluator::IsRowMajor }; diff --git a/Eigen/src/plugins/CommonCwiseBinaryOps.h b/Eigen/src/plugins/CommonCwiseBinaryOps.h index 8b6730ede..2f503296f 100644 --- a/Eigen/src/plugins/CommonCwiseBinaryOps.h +++ b/Eigen/src/plugins/CommonCwiseBinaryOps.h @@ -12,7 +12,7 @@ /** \returns an expression of the difference of \c *this and \a other * - * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-(). + * \note If you want to subtract a given scalar from all coefficients, see Cwise::operator-(). * * \sa class CwiseBinaryOp, operator-=() */ diff --git a/cmake/EigenTesting.cmake b/cmake/EigenTesting.cmake index b2747144c..571190121 100644 --- a/cmake/EigenTesting.cmake +++ b/cmake/EigenTesting.cmake @@ -678,8 +678,8 @@ endmacro() # Split all tests listed in EIGEN_TESTS_LIST into num_splits many targets # named buildtestspartN with N = { 0, ..., num_splits-1}. # -# The intention behind the existance of this macro is the size of Eigen's -# testsuite. Together with the relativly big compile-times building all tests +# The intention behind the existence of this macro is the size of Eigen's +# testsuite. Together with the relatively big compile-times building all tests # can take a substantial amount of time depending on the available hardware. # # The last buildtestspartN target will build possible remaining tests. diff --git a/cmake/FindComputeCpp.cmake b/cmake/FindComputeCpp.cmake index 1c271f0fe..e20052277 100644 --- a/cmake/FindComputeCpp.cmake +++ b/cmake/FindComputeCpp.cmake @@ -382,7 +382,7 @@ endfunction(__build_ir) ####################### # # Adds a SYCL compilation custom command associated with an existing -# target and sets a dependancy on that new command. +# target and sets a dependency on that new command. # # TARGET : Name of the target to add SYCL to. # SOURCES : Source files to be compiled for SYCL. diff --git a/doc/PreprocessorDirectives.dox b/doc/PreprocessorDirectives.dox index 0f545b086..9d4638895 100644 --- a/doc/PreprocessorDirectives.dox +++ b/doc/PreprocessorDirectives.dox @@ -104,7 +104,7 @@ run time. However, these assertions do cost time and can thus be turned off. - \b \c EIGEN_MAX_ALIGN_BYTES - Must be a power of two, or 0. Defines an upper bound on the memory boundary in bytes on which dynamically and statically allocated data may be aligned by %Eigen. If not defined, a default value is automatically computed based on architecture, compiler, and OS. This option is typically used to enforce binary compatibility between code/libraries compiled with different SIMD options. For instance, one may compile AVX code and enforce ABI compatibility with existing SSE code by defining \c EIGEN_MAX_ALIGN_BYTES=16. In the other way round, since by default AVX implies 32 bytes alignment for best performance, one can compile SSE code to be ABI compatible with AVX code by defining \c EIGEN_MAX_ALIGN_BYTES=32. - \b \c EIGEN_MAX_STATIC_ALIGN_BYTES - Same as \c EIGEN_MAX_ALIGN_BYTES but for statically allocated data only. By default, if only \c EIGEN_MAX_ALIGN_BYTES is defined, then \c EIGEN_MAX_STATIC_ALIGN_BYTES == \c EIGEN_MAX_ALIGN_BYTES, otherwise a default value is automatically computed based on architecture, compiler, and OS (can be smaller than the default value of EIGEN_MAX_ALIGN_BYTES on architectures that do not support stack alignment). - Let us emphasize that \c EIGEN_MAX_*_ALIGN_BYTES define only a diserable upper bound. In practice data is aligned to largest power-of-two common divisor of \c EIGEN_MAX_STATIC_ALIGN_BYTES and the size of the data, such that memory is not wasted. + Let us emphasize that \c EIGEN_MAX_*_ALIGN_BYTES define only a desirable upper bound. In practice data is aligned to largest power-of-two common divisor of \c EIGEN_MAX_STATIC_ALIGN_BYTES and the size of the data, such that memory is not wasted. - \b \c EIGEN_DONT_PARALLELIZE - if defined, this disables multi-threading. This is only relevant if you enabled OpenMP. See \ref TopicMultiThreading for details. - \b \c EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless diff --git a/doc/TopicLinearAlgebraDecompositions.dox b/doc/TopicLinearAlgebraDecompositions.dox index 402b3769e..8598ce65b 100644 --- a/doc/TopicLinearAlgebraDecompositions.dox +++ b/doc/TopicLinearAlgebraDecompositions.dox @@ -272,7 +272,7 @@ To get an overview of the true relative speed of the different decompositions, c
Blocking
Means the algorithm can work per block, whence guaranteeing a good scaling of the performance for large matrices.
Implicit Multi Threading (MT)
-
Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product routines.
+
Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algorithm itself is not parallelized, but that it relies on parallelized matrix-matrix product routines.
Explicit Multi Threading (MT)
Means the algorithm is explicitly parallelized to take advantage of multicore processors via OpenMP.
Meta-unroller
diff --git a/doc/TutorialSlicingIndexing.dox b/doc/TutorialSlicingIndexing.dox index 4d10dd081..57cc4017b 100644 --- a/doc/TutorialSlicingIndexing.dox +++ b/doc/TutorialSlicingIndexing.dox @@ -112,7 +112,7 @@ Here are some examples for a 2D array/matrix \c A and a 1D array/vector \c v. -As seen in the last exemple, referencing the last n elements (or rows/columns) is a bit cumbersome to write. +As seen in the last example, referencing the last n elements (or rows/columns) is a bit cumbersome to write. This becomes even more tricky and error prone with a non-default increment. Here comes \link Eigen::placeholders::lastN(SizeType) Eigen::placeholders::lastN(size) \endlink, and \link Eigen::placeholders::lastN(SizeType,IncrType) Eigen::placeholders::lastN(size,incr) \endlink: @@ -222,7 +222,7 @@ i = ind[i]; \endcode This means you can easily build your own fancy sequence generator and pass it to `operator()`. -Here is an exemple enlarging a given matrix while padding the additional first rows and columns through repetition: +Here is an example enlarging a given matrix while padding the additional first rows and columns through repetition: diff --git a/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp b/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp index bbb821e02..cc0c50ef0 100644 --- a/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp +++ b/doc/snippets/SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp @@ -3,7 +3,7 @@ MatrixXd A = X + X.transpose(); cout << "Here is a random symmetric matrix, A:" << endl << A << endl; X = MatrixXd::Random(5,5); MatrixXd B = X * X.transpose(); -cout << "and a random postive-definite matrix, B:" << endl << B << endl << endl; +cout << "and a random positive-definite matrix, B:" << endl << B << endl << endl; GeneralizedSelfAdjointEigenSolver es(A,B); cout << "The eigenvalues of the pencil (A,B) are:" << endl << es.eigenvalues() << endl; diff --git a/test/gpu_test_helper.h b/test/gpu_test_helper.h index bbbe97d8c..f796eeba5 100644 --- a/test/gpu_test_helper.h +++ b/test/gpu_test_helper.h @@ -306,7 +306,7 @@ auto run_on_cpu(Kernel kernel, Args&&... args) -> decltype(kernel(args...)){ * The kernel must be able to be passed directly as an input to a global * function (i.e. empty or POD). Its inputs must be "Serializable" so we * can transfer them to the device, and the output must be a Serializable value - * type so it can be transfered back from the device. + * type so it can be transferred back from the device. * * \param kernel kernel to run. * \param args ... input arguments, must be "Serializable". @@ -424,7 +424,7 @@ void print_gpu_device_info() * The kernel must be able to be passed directly as an input to a global * function (i.e. empty or POD). Its inputs must be "Serializable" so we * can transfer them to the device, and the output must be a Serializable value - * type so it can be transfered back from the device. + * type so it can be transferred back from the device. * * \param kernel kernel to run. * \param args ... input arguments, must be "Serializable". diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h index 3f3000f48..9f744ce4a 100755 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h @@ -522,10 +522,10 @@ class TensorContractionKernel { * \param rhs_scratch_extract : determines the RHS tile memory. It is either private or local memory based on the * selected contraction_type. * - * \param lhs_extract_index: determins the position of each thread on a local memory for lhs input. When private + * \param lhs_extract_index: determines the position of each thread on a local memory for lhs input. When private * memory is used this is set to zero as this is not applicable in case of private memory. * - * \param rhs_extract_index: determins the position of each thread on a local memory for rhs input. When private + * \param rhs_extract_index: determines the position of each thread on a local memory for rhs input. When private * memory is used this is set to zero as this is not applicable in case of private memory. * * \param lhs_scratch_compute : determines the location to load for computation for lhs_local memory. This is the @@ -1236,7 +1236,7 @@ struct GeneralVectorTensor { * * \param out_res: determines the output tensor containing the contraction result * - * \param rng: determins the total input data size + * \param rng: determines the total input data size */ template diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h index 61f8e1c0b..2452e1848 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h @@ -98,7 +98,7 @@ struct TensorEvaluator::value; @@ -700,7 +700,7 @@ struct TensorEvaluator::value; static const bool kIsRhs = is_rhs && std::is_same::value; - static_assert(kIsLhs || kIsRhs, "Unkown block type"); + static_assert(kIsLhs || kIsRhs, "Unknown block type"); using Blocks = ThreadLocalBlocks; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h index e2387d42f..9cd8c6de1 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h @@ -246,7 +246,7 @@ class QueueInterface { } /// The memcpyHostToDevice is used to copy the data from host to device - /// The destination pointer could be deleted before the copy happend which is + /// The destination pointer could be deleted before the copy happened which is /// why a callback function is needed. By default if none is provided, the /// function is blocking. EIGEN_STRONG_INLINE void memcpyHostToDevice( @@ -274,7 +274,7 @@ class QueueInterface { } /// The memcpyDeviceToHost is used to copy the data from device to host. - /// The source pointer could be deleted before the copy happend which is + /// The source pointer could be deleted before the copy happened which is /// why a callback function is needed. By default if none is provided, the /// function is blocking. EIGEN_STRONG_INLINE void memcpyDeviceToHost( diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h index ec6fa376e..9fce0f045 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h @@ -31,7 +31,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T* constCast(const T* data) { } // The StorageMemory class is a container of the device specific pointer -// used for refering to a Pointer on TensorEvaluator class. While the TensorExpression +// used for referring to a Pointer on TensorEvaluator class. While the TensorExpression // is a device-agnostic type and need MakePointer class for type conversion, // the TensorEvaluator class can be specialized for a device, hence it is possible // to construct different types of temproray storage memory in TensorEvaluator diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h b/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h index 27379c15a..c5a83d259 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h @@ -142,7 +142,7 @@ template class UniformRandomGenerator { // but for SYCL ((CLOCK * 6364136223846793005ULL) + 0xda3e39cb94b95bdbULL) is passed to each thread and each thread adds // the (global_thread_id* 6364136223846793005ULL) for itself only once, in order to complete the construction // similar to CUDA Therefore, the thread Id injection is not available at this stage. - //However when the operator() is called the thread ID will be avilable. So inside the opeator, + //However when the operator() is called the thread ID will be available. So inside the opeator, // we add the thrreadID, BlockId,... (which is equivalent of i) //to the seed and construct the unique m_state per thead similar to cuda. m_exec_once =false; @@ -252,7 +252,7 @@ template class NormalRandomGenerator { //the same for all the thread. As unlike CUDA, the thread.ID, BlockID, etc is not a global function. // and only available on the Operator() function (which is called on the GPU). // Therefore, the thread Id injection is not available at this stage. However when the operator() - //is called the thread ID will be avilable. So inside the opeator, + //is called the thread ID will be available. So inside the operator, // we add the thrreadID, BlockId,... (which is equivalent of i) //to the seed and construct the unique m_state per thead similar to cuda. m_exec_once =false; diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h index 23cf4f280..59e192ef5 100644 --- a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h +++ b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h @@ -87,7 +87,7 @@ class EventCount { CheckState(state, true); uint64_t newstate; if ((state & kSignalMask) != 0) { - // Consume the signal and return immidiately. + // Consume the signal and return immediately. newstate = state - kWaiterInc - kSignalInc; } else { // Remove this thread from pre-wait counter and add to the waiter stack. @@ -114,7 +114,7 @@ class EventCount { CheckState(state, true); uint64_t newstate = state - kWaiterInc; // We don't know if the thread was also notified or not, - // so we should not consume a signal unconditionaly. + // so we should not consume a signal unconditionally. // Only if number of waiters is equal to number of signals, // we know that the thread was notified and we must take away the signal. if (((state & kWaiterMask) >> kWaiterShift) == diff --git a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h index ae7a0048c..47f8c8438 100644 --- a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h +++ b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h @@ -165,7 +165,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x, p = z + gamma*p; ++iter; - // one dimensionnal optimization + // one dimensional optimization q = A * p; lambda = rho / q.dot(p); for (Index i = 0; i < C.rows(); ++i) diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 4909560a8..63d7cb80e 100755 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -20,7 +20,7 @@ namespace Eigen namespace internal { - /** \internal Low-level Induced Dimension Reduction algoritm + /** \internal Low-level Induced Dimension Reduction algorithm \param A The matrix A \param b The right hand side vector b \param x On input and initial solution, on output the computed solution. @@ -372,7 +372,7 @@ namespace Eigen /** \internal */ /** Loops over the number of columns of b and does the following: - 1. sets the tolerence and maxIterations + 1. sets the tolerance and maxIterations 2. Calls the function that has the core solver routine */ template
Example:Output: