// // Copyright 2005-2007 Adobe Systems Incorporated // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // #ifndef BOOST_GIL_ALGORITHM_HPP #define BOOST_GIL_ALGORITHM_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace boost { namespace gil { //forward declarations template struct planar_pixel_iterator; template class memory_based_step_iterator; template class memory_based_2d_locator; // a tag denoting incompatible arguments struct error_t {}; /// \defgroup ImageViewSTLAlgorithms STL-like Algorithms /// \ingroup ImageViewAlgorithm /// \brief Image view-equivalents of STL algorithms /// /// Image views provide 1D iteration of their pixels via \p begin() and \p end() methods, /// which makes it possible to use STL algorithms with them. However, using nested loops /// over X and Y is in many cases more efficient. The algorithms in this section resemble /// STL algorithms, but they abstract away the nested loops and take views (as opposed to ranges) as input. /// /// Most algorithms check whether the image views are 1D-traversable. A 1D-traversable image view has no gaps /// at the end of the rows. In other words, if an x_iterator of that view is advanced past the last pixel in a row /// it will move to the first pixel of the next row. When image views are 1D-traversable, the algorithms use /// a single loop and run more efficiently. If one or more of the input views are not 1D-traversable, the algorithms /// fall-back to an X-loop nested inside a Y-loop. /// /// The algorithms typically delegate the work to their corresponding STL algorithms. For example, \p copy_pixels calls /// \p std::copy either for each row, or, when the images are 1D-traversable, once for all pixels. /// /// In addition, overloads are sometimes provided for the STL algorithms. For example, std::copy for planar iterators /// is overloaded to perform \p std::copy for each of the planes. \p std::copy over bitwise-copiable pixels results in /// std::copy over unsigned char, which STL typically implements via \p memmove. /// /// As a result \p copy_pixels may result in a single call to \p memmove for interleaved 1D-traversable views, /// or one per each plane of planar 1D-traversable views, or one per each row of interleaved non-1D-traversable images, etc. /// \defgroup STLOptimizations Performance overloads of STL algorithms /// \ingroup ImageViewAlgorithm /// \brief overloads of STL algorithms allowing more efficient implementation when used with GIL constructs /// \brief A generic binary operation on views /// \ingroup ImageViewSTLAlgorithms /// /// Use this class as a convenience superclass when defining an operation for any image views. /// Many operations have different behavior when the two views are compatible. This class checks /// for compatibility and invokes apply_compatible(V1,V2) or apply_incompatible(V1,V2) of the subclass. /// You must provide apply_compatible(V1,V2) method in your subclass, but apply_incompatible(V1,V2) /// is not required and the default throws std::bad_cast. template struct binary_operation_obj { using result_type = Result; template BOOST_FORCEINLINE result_type operator()(const std::pair& p) const { return apply(*p.first, *p.second, typename views_are_compatible::type()); } template BOOST_FORCEINLINE result_type operator()(const V1& v1, const V2& v2) const { return apply(v1, v2, typename views_are_compatible::type()); } result_type operator()(const error_t&) const { throw std::bad_cast(); } private: // dispatch from apply overload to a function with distinct name template BOOST_FORCEINLINE result_type apply(V1 const& v1, V2 const& v2, std::false_type) const { return ((const Derived*)this)->apply_incompatible(v1, v2); } // dispatch from apply overload to a function with distinct name template BOOST_FORCEINLINE result_type apply(V1 const& v1, V2 const& v2, std::true_type) const { return ((const Derived*)this)->apply_compatible(v1, v2); } // function with distinct name - it can be overloaded by subclasses template BOOST_FORCEINLINE result_type apply_incompatible(V1 const& /*v1*/, V2 const& /*v2*/) const { throw std::bad_cast(); } }; }} // namespace boost::gil ////////////////////////////////////////////////////////////////////////////////////// // std::copy and gil::copy_pixels ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsCopyPixels copy_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief std::copy for image views namespace std { /// \ingroup STLOptimizations /// \brief Copy when both src and dst are interleaved and of the same type can be just memmove template BOOST_FORCEINLINE auto copy( boost::gil::pixel* first, boost::gil::pixel* last, boost::gil::pixel* dst) -> boost::gil::pixel* { auto p = std::copy((unsigned char*)first, (unsigned char*)last, (unsigned char*)dst); return reinterpret_cast*>(p); } /// \ingroup STLOptimizations /// \brief Copy when both src and dst are interleaved and of the same type can be just memmove template BOOST_FORCEINLINE boost::gil::pixel* copy(const boost::gil::pixel* first, const boost::gil::pixel* last, boost::gil::pixel* dst) { return (boost::gil::pixel*)std::copy((unsigned char*)first,(unsigned char*)last, (unsigned char*)dst); } } // namespace std namespace boost { namespace gil { namespace detail { template struct copy_fn { BOOST_FORCEINLINE I operator()(I first, I last, O dst) const { return std::copy(first,last,dst); } }; } // namespace detail } } // namespace boost::gil namespace std { /// \ingroup STLOptimizations /// \brief Copy when both src and dst are planar pointers is copy for each channel template BOOST_FORCEINLINE boost::gil::planar_pixel_iterator copy(boost::gil::planar_pixel_iterator first, boost::gil::planar_pixel_iterator last, boost::gil::planar_pixel_iterator dst) { boost::gil::gil_function_requires::value_type,typename std::iterator_traits::value_type>>(); static_for_each(first,last,dst,boost::gil::detail::copy_fn()); return dst+(last-first); } } // namespace std namespace boost { namespace gil { namespace detail { /// Does a copy-n. If the inputs contain image iterators, performs a copy at each row using the row iterators /// \ingroup CopyPixels template struct copier_n { BOOST_FORCEINLINE void operator()(I src, typename std::iterator_traits::difference_type n, O dst) const { std::copy(src,src+n, dst); } }; /// Source range is delimited by image iterators template // IL Models ConstPixelLocatorConcept, O Models PixelIteratorConcept struct copier_n,O> { using diff_t = typename std::iterator_traits>::difference_type; BOOST_FORCEINLINE void operator()(iterator_from_2d src, diff_t n, O dst) const { gil_function_requires>(); gil_function_requires>(); while (n>0) { diff_t l=src.width()-src.x_pos(); diff_t numToCopy=(n // I Models ConstPixelIteratorConcept, OL Models PixelLocatorConcept struct copier_n> { using diff_t = typename std::iterator_traits::difference_type; BOOST_FORCEINLINE void operator()(I src, diff_t n, iterator_from_2d
    dst) const { gil_function_requires>(); gil_function_requires>(); while (n>0) { diff_t l=dst.width()-dst.x_pos(); diff_t numToCopy=(n struct copier_n,iterator_from_2d
      > { using diff_t = typename iterator_from_2d::difference_type; BOOST_FORCEINLINE void operator()(iterator_from_2d src, diff_t n, iterator_from_2d
        dst) const { gil_function_requires>(); gil_function_requires>(); if (src.x_pos()!=dst.x_pos() || src.width()!=dst.width()) { while(n-->0) { *dst++=*src++; } } while (n>0) { diff_t l=dst.width()-dst.x_pos(); diff_t numToCopy=(n BOOST_FORCEINLINE DstIterator copy_with_2d_iterators(SrcIterator first, SrcIterator last, DstIterator dst) { using src_x_iterator = typename SrcIterator::x_iterator; using dst_x_iterator = typename DstIterator::x_iterator; typename SrcIterator::difference_type n = last - first; if (first.is_1d_traversable()) { if (dst.is_1d_traversable()) copier_n()(first.x(),n, dst.x()); else copier_n()(first.x(),n, dst); } else { if (dst.is_1d_traversable()) copier_n()(first,n, dst.x()); else copier_n()(first,n,dst); } return dst+n; } } // namespace detail } } // namespace boost::gil namespace std { /// \ingroup STLOptimizations /// \brief std::copy(I1,I1,I2) with I1 and I2 being a iterator_from_2d template BOOST_FORCEINLINE boost::gil::iterator_from_2d
          copy1(boost::gil::iterator_from_2d first, boost::gil::iterator_from_2d last, boost::gil::iterator_from_2d
            dst) { return boost::gil::detail::copy_with_2d_iterators(first,last,dst); } } // namespace std namespace boost { namespace gil { /// \ingroup ImageViewSTLAlgorithmsCopyPixels /// \brief std::copy for image views template BOOST_FORCEINLINE void copy_pixels(const View1& src, const View2& dst) { BOOST_ASSERT(src.dimensions() == dst.dimensions()); detail::copy_with_2d_iterators(src.begin(),src.end(),dst.begin()); } ////////////////////////////////////////////////////////////////////////////////////// // copy_and_convert_pixels ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsCopyAndConvertPixels copy_and_convert_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief copies src view into dst view, color converting if necessary. /// /// Versions taking static and runtime views are provided. Versions taking user-defined color convered are provided. namespace detail { template class copy_and_convert_pixels_fn : public binary_operation_obj> { private: CC _cc; public: using result_type = typename binary_operation_obj>::result_type; copy_and_convert_pixels_fn() {} copy_and_convert_pixels_fn(CC cc_in) : _cc(cc_in) {} // when the two color spaces are incompatible, a color conversion is performed template BOOST_FORCEINLINE result_type apply_incompatible(const V1& src, const V2& dst) const { copy_pixels(color_converted_view(src,_cc),dst); } // If the two color spaces are compatible, copy_and_convert is just copy template BOOST_FORCEINLINE result_type apply_compatible(const V1& src, const V2& dst) const { copy_pixels(src,dst); } }; } // namespace detail /// \ingroup ImageViewSTLAlgorithmsCopyAndConvertPixels template BOOST_FORCEINLINE void copy_and_convert_pixels(const V1& src, const V2& dst,CC cc) { detail::copy_and_convert_pixels_fn ccp(cc); ccp(src,dst); } struct default_color_converter; /// \ingroup ImageViewSTLAlgorithmsCopyAndConvertPixels template BOOST_FORCEINLINE void copy_and_convert_pixels(const View1& src, const View2& dst) { detail::copy_and_convert_pixels_fn ccp; ccp(src,dst); } } } // namespace boost::gil ////////////////////////////////////////////////////////////////////////////////////// // std::fill and gil::fill_pixels ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsFillPixels fill_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief std::fill for image views namespace std { /// \ingroup STLOptimizations /// \brief std::fill(I,I,V) with I being a iterator_from_2d /// /// Invoked when one calls std::fill(I,I,V) with I being a iterator_from_2d (which is /// a 1D iterator over the pixels in an image). For contiguous images (i.e. images that have /// no alignment gap at the end of each row) it is more efficient to use the underlying /// pixel iterator that does not check for the end of rows. For non-contiguous images fill /// resolves to fill of each row using the underlying pixel iterator, which is still faster template void fill(boost::gil::iterator_from_2d first, boost::gil::iterator_from_2d last, const V& val) { boost::gil::gil_function_requires>(); if (first.is_1d_traversable()) { std::fill(first.x(), last.x(), val); } else { // fill row by row std::ptrdiff_t n=last-first; while (n>0) { std::ptrdiff_t numToDo=std::min(n,(std::ptrdiff_t)(first.width()-first.x_pos())); std::fill_n(first.x(), numToDo, val); first+=numToDo; n-=numToDo; } } } } // namespace std namespace boost { namespace gil { namespace detail { /// struct to do std::fill struct std_fill_t { template void operator()(It first, It last, const P& p_in) { std::fill(first,last,p_in); } }; /// std::fill for planar iterators template BOOST_FORCEINLINE void fill_aux(It first, It last, P const& p, std::true_type) { static_for_each(first, last, p, std_fill_t()); } /// std::fill for interleaved iterators template BOOST_FORCEINLINE void fill_aux(It first, It last, P const& p, std::false_type) { std::fill(first, last, p); } } // namespace detail /// \ingroup ImageViewSTLAlgorithmsFillPixels /// \brief std::fill for image views template BOOST_FORCEINLINE void fill_pixels(View const& view, Value const& value) { if (view.is_1d_traversable()) { detail::fill_aux( view.begin().x(), view.end().x(), value, is_planar()); } else { for (std::ptrdiff_t y = 0; y < view.height(); ++y) detail::fill_aux( view.row_begin(y), view.row_end(y), value, is_planar()); } } ////////////////////////////////////////////////////////////////////////////////////// // destruct_pixels ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsDestructPixels destruct_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief invokes the destructor on every pixel of an image view namespace detail { template BOOST_FORCEINLINE void destruct_range_impl(Iterator first, Iterator last, typename std::enable_if < mp11::mp_and < std::is_pointer, mp11::mp_not < detail::is_trivially_destructible::value_type> > >::value >::type* /*ptr*/ = 0) { while (first != last) { first->~value_t(); ++first; } } template BOOST_FORCEINLINE void destruct_range_impl(Iterator /*first*/, Iterator /*last*/, typename std::enable_if < mp11::mp_or < mp11::mp_not>, detail::is_trivially_destructible::value_type> >::value >::type* /* ptr */ = nullptr) { } template BOOST_FORCEINLINE void destruct_range(Iterator first, Iterator last) { destruct_range_impl(first, last); } struct std_destruct_t { template void operator()(Iterator first, Iterator last) const { destruct_range(first,last); } }; /// destruct for planar iterators template BOOST_FORCEINLINE void destruct_aux(It first, It last, std::true_type) { static_for_each(first,last,std_destruct_t()); } /// destruct for interleaved iterators template BOOST_FORCEINLINE void destruct_aux(It first, It last, std::false_type) { destruct_range(first,last); } } // namespace detail /// \ingroup ImageViewSTLAlgorithmsDestructPixels /// \brief Invokes the in-place destructor on every pixel of the view template BOOST_FORCEINLINE void destruct_pixels(View const& view) { if (view.is_1d_traversable()) { detail::destruct_aux( view.begin().x(), view.end().x(), is_planar()); } else { for (std::ptrdiff_t y = 0; y < view.height(); ++y) detail::destruct_aux( view.row_begin(y), view.row_end(y), is_planar()); } } ////////////////////////////////////////////////////////////////////////////////////// // uninitialized_fill_pixels ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsUninitializedFillPixels uninitialized_fill_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief std::uninitialized_fill for image views namespace detail { /// std::uninitialized_fill for planar iterators /// If an exception is thrown destructs any in-place copy-constructed objects template BOOST_FORCEINLINE void uninitialized_fill_aux(It first, It last, P const& p, std::true_type) { int channel = 0; try { using pixel_t = typename std::iterator_traits::value_type; while (channel < num_channels::value) { std::uninitialized_fill( dynamic_at_c(first,channel), dynamic_at_c(last,channel), dynamic_at_c(p,channel)); ++channel; } } catch (...) { for (int c = 0; c < channel; ++c) destruct_range(dynamic_at_c(first, c), dynamic_at_c(last, c)); throw; } } /// std::uninitialized_fill for interleaved iterators /// If an exception is thrown destructs any in-place copy-constructed objects template BOOST_FORCEINLINE void uninitialized_fill_aux(It first, It last, P const& p, std::false_type) { std::uninitialized_fill(first,last,p); } } // namespace detail /// \ingroup ImageViewSTLAlgorithmsUninitializedFillPixels /// \brief std::uninitialized_fill for image views. /// Does not support planar heterogeneous views. /// If an exception is thrown destructs any in-place copy-constructed pixels template void uninitialized_fill_pixels(const View& view, const Value& val) { if (view.is_1d_traversable()) detail::uninitialized_fill_aux(view.begin().x(), view.end().x(), val,is_planar()); else { typename View::y_coord_t y = 0; try { for (y=0; y()); } catch(...) { for (typename View::y_coord_t y0=0; y0()); throw; } } } ////////////////////////////////////////////////////////////////////////////////////// // default_construct_pixels ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsDefaultConstructPixels default_construct_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief invokes the default constructor on every pixel of an image view namespace detail { template BOOST_FORCEINLINE void default_construct_range_impl(It first, It last, std::true_type) { It first1 = first; try { using value_t = typename std::iterator_traits::value_type; while (first != last) { new (first) value_t(); ++first; } } catch (...) { destruct_range(first1, first); throw; } } template BOOST_FORCEINLINE void default_construct_range_impl(It, It, std::false_type) {} template BOOST_FORCEINLINE void default_construct_range(It first, It last) { default_construct_range_impl(first, last, typename std::is_pointer::type()); } /// uninitialized_default_construct for planar iterators template BOOST_FORCEINLINE void default_construct_aux(It first, It last, std::true_type) { int channel = 0; try { using pixel_t = typename std::iterator_traits::value_type; while (channel < num_channels::value) { default_construct_range(dynamic_at_c(first, channel), dynamic_at_c(last, channel)); ++channel; } } catch (...) { for (int c = 0; c < channel; ++c) destruct_range(dynamic_at_c(first, c), dynamic_at_c(last, c)); throw; } } /// uninitialized_default_construct for interleaved iterators template BOOST_FORCEINLINE void default_construct_aux(It first, It last, std::false_type) { default_construct_range(first, last); } template struct has_trivial_pixel_constructor : detail::is_trivially_default_constructible {}; template struct has_trivial_pixel_constructor : detail::is_trivially_default_constructible::type> {}; template BOOST_FORCEINLINE void default_construct_pixels_impl( View const& view, std::enable_if* /*ptr*/ = nullptr) { if (view.is_1d_traversable()) { detail::default_construct_aux( view.begin().x(), view.end().x(), is_planar()); } else { typename View::y_coord_t y = 0; try { for( y = 0; y < view.height(); ++y ) detail::default_construct_aux( view.row_begin(y), view.row_end(y), is_planar()); } catch(...) { for (typename View::y_coord_t y0 = 0; y0 < y; ++y0 ) detail::destruct_aux( view.row_begin(y0), view.row_end(y0), is_planar()); throw; } } } } // namespace detail /// \ingroup ImageViewSTLAlgorithmsDefaultConstructPixels /// \brief Invokes the in-place default constructor on every pixel of the (uninitialized) view. /// Does not support planar heterogeneous views. /// If an exception is thrown destructs any in-place default-constructed pixels template void default_construct_pixels(View const& view) { detail::default_construct_pixels_impl < View, detail::has_trivial_pixel_constructor < View, is_planar::value >::value >(view); } ////////////////////////////////////////////////////////////////////////////////////// // uninitialized_copy_pixels ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsUninitializedCopyPixels uninitialized_copy_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief std::uninitialized_copy for image views namespace detail { /// std::uninitialized_copy for pairs of planar iterators template BOOST_FORCEINLINE void uninitialized_copy_aux(It1 first1, It1 last1, It2 first2, std::true_type) { int channel=0; try { using pixel_t = typename std::iterator_traits::value_type; while (channel < num_channels::value) { std::uninitialized_copy( dynamic_at_c(first1, channel), dynamic_at_c(last1, channel), dynamic_at_c(first2, channel)); ++channel; } } catch (...) { It2 last2 = first2; std::advance(last2, std::distance(first1, last1)); for (int c = 0; c < channel; ++c) destruct_range(dynamic_at_c(first2, c), dynamic_at_c(last2, c)); throw; } } /// std::uninitialized_copy for interleaved or mixed iterators template BOOST_FORCEINLINE void uninitialized_copy_aux(It1 first1, It1 last1, It2 first2, std::false_type) { std::uninitialized_copy(first1, last1, first2); } } // namespace detail /// \ingroup ImageViewSTLAlgorithmsUninitializedCopyPixels /// \brief std::uninitialized_copy for image views. /// Does not support planar heterogeneous views. /// If an exception is thrown destructs any in-place copy-constructed objects template void uninitialized_copy_pixels(View1 const& view1, View2 const& view2) { using is_planar = std::integral_constant::value && is_planar::value>; BOOST_ASSERT(view1.dimensions() == view2.dimensions()); if (view1.is_1d_traversable() && view2.is_1d_traversable()) { detail::uninitialized_copy_aux( view1.begin().x(), view1.end().x(), view2.begin().x(), is_planar()); } else { typename View1::y_coord_t y = 0; try { for (y = 0; y < view1.height(); ++y) detail::uninitialized_copy_aux( view1.row_begin(y), view1.row_end(y), view2.row_begin(y), is_planar()); } catch(...) { for (typename View1::y_coord_t y0 = 0; y0 < y; ++y0) detail::destruct_aux(view2.row_begin(y0), view2.row_end(y0), is_planar()); throw; } } } ////////////////////////////////////////////////////////////////////////////////////// // for_each_pixel ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsForEachPixel for_each_pixel /// \ingroup ImageViewSTLAlgorithms /// \brief std::for_each for image views /// /// For contiguous images (i.e. images that have no alignment gap at the end of each row) it is /// more efficient to use the underlying pixel iterator that does not check for the end of rows. /// For non-contiguous images for_each_pixel resolves to for_each of each row using the underlying /// pixel iterator, which is still faster /// \ingroup ImageViewSTLAlgorithmsForEachPixel template F for_each_pixel(View const& view, F fun) { if (view.is_1d_traversable()) { return std::for_each(view.begin().x(), view.end().x(), fun); } else { for (std::ptrdiff_t y = 0; y < view.height(); ++y) std::for_each(view.row_begin(y), view.row_end(y), fun); return fun; } } /// \defgroup ImageViewSTLAlgorithmsForEachPixelPosition for_each_pixel_position /// \ingroup ImageViewSTLAlgorithms /// \brief adobe::for_each_position for image views (passes locators, instead of pixel references, to the function object) /// \ingroup ImageViewSTLAlgorithmsForEachPixelPosition template F for_each_pixel_position(View const& view, F fun) { typename View::xy_locator loc = view.xy_at(0, 0); for (std::ptrdiff_t y = 0; y < view.height(); ++y) { for (std::ptrdiff_t x = 0; x < view.width(); ++x, ++loc.x()) fun(loc); loc.x() -= view.width(); ++loc.y(); } return fun; } ////////////////////////////////////////////////////////////////////////////////////// // generate_pixels ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsGeneratePixels generate_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief std::generate for image views /// \ingroup ImageViewSTLAlgorithmsGeneratePixels /// \brief std::generate for image views template void generate_pixels(View const& view, F fun) { if (view.is_1d_traversable()) { std::generate(view.begin().x(), view.end().x(), fun); } else { for (std::ptrdiff_t y = 0; y < view.height(); ++y) std::generate(view.row_begin(y), view.row_end(y), fun); } } ////////////////////////////////////////////////////////////////////////////////////// // std::equal and gil::equal_pixels for GIL constructs ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsEqualPixels equal_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief std::equal for image views template BOOST_FORCEINLINE bool equal_n(I1 i1, std::ptrdiff_t n, I2 i2); namespace detail { template struct equal_n_fn { BOOST_FORCEINLINE bool operator()(I1 i1, std::ptrdiff_t n, I2 i2) const { return std::equal(i1, i1 + n, i2); } }; /// Equal when both ranges are interleaved and of the same type. /// GIL pixels are bitwise comparable, so memcmp is used. User-defined pixels that are not bitwise comparable need to provide an overload template struct equal_n_fn const*, pixel const*> { BOOST_FORCEINLINE bool operator()(pixel const* i1, std::ptrdiff_t n, pixel const* i2) const { return memcmp(i1, i2, n * sizeof(pixel)) == 0; } }; template struct equal_n_fn*, pixel*> : equal_n_fn const*, pixel const*> {}; /// EqualPixels /// Equal when both ranges are planar pointers of the same type. memcmp is invoked for each channel plane /// User-defined channels that are not bitwise comparable need to provide an overload template struct equal_n_fn, planar_pixel_iterator> { BOOST_FORCEINLINE bool operator()(planar_pixel_iterator const i1, std::ptrdiff_t n, planar_pixel_iterator const i2) const { // FIXME: ptrdiff_t vs size_t constexpr std::ptrdiff_t byte_size = n * sizeof(typename std::iterator_traits::value_type); for (std::ptrdiff_t i = 0; i < mp11::mp_size::value; ++i) { if (memcmp(dynamic_at_c(i1, i), dynamic_at_c(i2, i), byte_size) != 0) return false; } return true; } }; /// Source range is delimited by image iterators /// \tparam Loc Models ConstPixelLocatorConcept /// \tparam It Models PixelIteratorConcept template struct equal_n_fn, It> { BOOST_FORCEINLINE bool operator()(boost::gil::iterator_from_2d i1, std::ptrdiff_t n, It i2) const { gil_function_requires>(); gil_function_requires>(); while (n > 0) { std::ptrdiff_t const num = std::min(n, i1.width() - i1.x_pos()); if (!equal_n(i1.x(), num, i2)) return false; i1 += num; i2 += num; n -= num; } return true; } }; /// Destination range is delimited by image iterators /// \tparam It Models PixelIteratorConcept /// \tparam Loc Models PixelLocatorConcept template struct equal_n_fn> { BOOST_FORCEINLINE bool operator()(It i1, std::ptrdiff_t n, boost::gil::iterator_from_2d i2) const { gil_function_requires>(); gil_function_requires>(); while (n > 0) { std::ptrdiff_t const num = std::min(n, i2.width() - i2.x_pos()); if (!equal_n(i1, num, i2.x())) return false; i1 += num; i2 += num; n -= num; } return true; } }; /// Both source and destination ranges are delimited by image iterators template struct equal_n_fn,boost::gil::iterator_from_2d> { BOOST_FORCEINLINE bool operator()(boost::gil::iterator_from_2d i1, std::ptrdiff_t n, boost::gil::iterator_from_2d i2) const { gil_function_requires>(); gil_function_requires>(); if (i1.x_pos()!=i2.x_pos() || i1.width()!=i2.width()) { while(n-->0) { if (*i1++!=*i2++) return false; } } while (n>0) { std::ptrdiff_t num=std::min(n,i2.width()-i2.x_pos()); if (!equal_n(i1.x(), num, i2.x())) return false; i1+=num; i2+=num; n-=num; } return true; } }; } // namespace detail template BOOST_FORCEINLINE bool equal_n(I1 i1, std::ptrdiff_t n, I2 i2) { return detail::equal_n_fn()(i1,n,i2); } } } // namespace boost::gil namespace std { /// \ingroup STLOptimizations /// \brief std::equal(I1,I1,I2) with I1 and I2 being a iterator_from_2d /// /// Invoked when one calls std::equal(I1,I1,I2) with I1 and I2 being a iterator_from_2d (which is /// a 1D iterator over the pixels in an image). Attempts to demote the source and destination /// iterators to simpler/faster types if the corresponding range is contiguous. /// For contiguous images (i.e. images that have /// no alignment gap at the end of each row) it is more efficient to use the underlying /// pixel iterator that does not check for the end of rows. If the underlying pixel iterator /// happens to be a fundamental planar/interleaved pointer, the call may further resolve /// to memcmp. Otherwise it resolves to copying each row using the underlying pixel iterator template BOOST_FORCEINLINE bool equal(boost::gil::iterator_from_2d first, boost::gil::iterator_from_2d last, boost::gil::iterator_from_2d first2) { boost::gil::gil_function_requires>(); boost::gil::gil_function_requires>(); std::ptrdiff_t n=last-first; if (first.is_1d_traversable()) { if (first2.is_1d_traversable()) return boost::gil::detail::equal_n_fn()(first.x(),n, first2.x()); else return boost::gil::detail::equal_n_fn>()(first.x(),n, first2); } else { if (first2.is_1d_traversable()) return boost::gil::detail::equal_n_fn,typename Loc2::x_iterator>()(first,n, first2.x()); else return boost::gil::detail::equal_n_fn,boost::gil::iterator_from_2d>()(first,n,first2); } } } // namespace std namespace boost { namespace gil { /// \ingroup ImageViewSTLAlgorithmsEqualPixels /// \brief std::equal for image views template BOOST_FORCEINLINE bool equal_pixels(const View1& v1, const View2& v2) { BOOST_ASSERT(v1.dimensions() == v2.dimensions()); return std::equal(v1.begin(),v1.end(),v2.begin()); // std::equal has overloads with GIL iterators for optimal performance } ////////////////////////////////////////////////////////////////////////////////////// /// /// transform_pixels /// ////////////////////////////////////////////////////////////////////////////////////// /// \defgroup ImageViewSTLAlgorithmsTransformPixels transform_pixels /// \ingroup ImageViewSTLAlgorithms /// \brief std::transform for image views /// \ingroup ImageViewSTLAlgorithmsTransformPixels /// \brief std::transform for image views template BOOST_FORCEINLINE F transform_pixels(const View1& src,const View2& dst, F fun) { BOOST_ASSERT(src.dimensions() == dst.dimensions()); for (std::ptrdiff_t y=0; y BOOST_FORCEINLINE F transform_pixels(const View1& src1, const View2& src2,const View3& dst, F fun) { for (std::ptrdiff_t y=0; y BOOST_FORCEINLINE F transform_pixel_positions(const View1& src,const View2& dst, F fun) { BOOST_ASSERT(src.dimensions() == dst.dimensions()); typename View1::xy_locator loc=src.xy_at(0,0); for (std::ptrdiff_t y=0; y BOOST_FORCEINLINE F transform_pixel_positions(const View1& src1,const View2& src2,const View3& dst, F fun) { BOOST_ASSERT(src1.dimensions() == dst.dimensions()); BOOST_ASSERT(src2.dimensions() == dst.dimensions()); typename View1::xy_locator loc1=src1.xy_at(0,0); typename View2::xy_locator loc2=src2.xy_at(0,0); for (std::ptrdiff_t y=0; y