四个简单算法:min_element,max_element,sort,find

本文通过一个C++程序示例介绍了如何使用标准库中的vector容器进行基本操作,包括查找最小元素、最大元素、排序及逆序等,并展示了如何结合<algorithm>头文件中的函数来高效地处理容器数据。

转自:http://hi.baidu.com/btcartoon/blog/item/3d8c87efb53d2c1efdfa3c1d.html

#include <iostream>

#include <vector>

#include <algorithm>

using namespace std;

int main()

{

vector<int> coll;

vector<int>::iterator pos;

coll.push_back(3);

coll.push_back(7);

coll.push_back(1);

coll.push_back(4);

coll.push_back(5);

for(int i=0 ; i<coll.size() ; ++i)

cout << coll[i] << " ";

cout << endl;

pos = min_element(coll.begin() , coll.end());

cout << "min:" << *pos << endl;

pos = max_element(coll.begin() , coll.end());

cout << "max:" << *pos << endl;

sort(coll.begin() , coll.end());

cout << "after sort:" << endl;

for(int i1=0 ; i1<coll.size() ; ++i1)

cout << coll[i1] << " ";

cout << endl;

pos = find(coll.begin() , coll.end() , 3);

reverse(pos , coll.end());

cout << "after reverse at 3:" << endl;

for(int i2=0 ; i2<coll.size() ; ++i2)

cout << coll[i2] << " ";

cout << endl;

}

// Core algorithmic facilities -*- C++ -*- // Copyright (C) 2001-2014 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /* * * Copyright (c) 1994 * Hewlett-Packard Company * * Permission to use, copy, modify, distribute and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appear in all copies and * that both that copyright notice and this permission notice appear * in supporting documentation. Hewlett-Packard Company makes no * representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. * * * Copyright (c) 1996-1998 * Silicon Graphics Computer Systems, Inc. * * Permission to use, copy, modify, distribute and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appear in all copies and * that both that copyright notice and this permission notice appear * in supporting documentation. Silicon Graphics makes no * representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. */ /** @file bits/stl_algobase.h * This is an internal header file, included by other library headers. * Do not attempt to use it directly. @headername{algorithm} */ #ifndef _STL_ALGOBASE_H #define _STL_ALGOBASE_H 1 #include <bits/c++config.h> #include <bits/functexcept.h> #include <bits/cpp_type_traits.h> #include <ext/type_traits.h> #include <ext/numeric_traits.h> #include <bits/stl_pair.h> #include <bits/stl_iterator_base_types.h> #include <bits/stl_iterator_base_funcs.h> #include <bits/stl_iterator.h> #include <bits/concept_check.h> #include <debug/debug.h> #include <bits/move.h> // For std::swap and _GLIBCXX_MOVE #include <bits/predefined_ops.h> namespace std _GLIBCXX_VISIBILITY(default) { _GLIBCXX_BEGIN_NAMESPACE_VERSION #if __cplusplus < 201103L // See http://gcc.gnu.org/ml/libstdc++/2004-08/msg00167.html: in a // nutshell, we are partially implementing the resolution of DR 187, // when it's safe, i.e., the value_types are equal. template<bool _BoolType> struct __iter_swap { template<typename _ForwardIterator1, typename _ForwardIterator2> static void iter_swap(_ForwardIterator1 __a, _ForwardIterator2 __b) { typedef typename iterator_traits<_ForwardIterator1>::value_type _ValueType1; _ValueType1 __tmp = _GLIBCXX_MOVE(*__a); *__a = _GLIBCXX_MOVE(*__b); *__b = _GLIBCXX_MOVE(__tmp); } }; template<> struct __iter_swap<true> { template<typename _ForwardIterator1, typename _ForwardIterator2> static void iter_swap(_ForwardIterator1 __a, _ForwardIterator2 __b) { swap(*__a, *__b); } }; #endif /** * @brief Swaps the contents of two iterators. * @ingroup mutating_algorithms * @param __a An iterator. * @param __b Another iterator. * @return Nothing. * * This function swaps the values pointed to by two iterators, not the * iterators themselves. */ template<typename _ForwardIterator1, typename _ForwardIterator2> inline void iter_swap(_ForwardIterator1 __a, _ForwardIterator2 __b) { // concept requirements __glibcxx_function_requires(_Mutable_ForwardIteratorConcept< _ForwardIterator1>) __glibcxx_function_requires(_Mutable_ForwardIteratorConcept< _ForwardIterator2>) #if __cplusplus < 201103L typedef typename iterator_traits<_ForwardIterator1>::value_type _ValueType1; typedef typename iterator_traits<_ForwardIterator2>::value_type _ValueType2; __glibcxx_function_requires(_ConvertibleConcept<_ValueType1, _ValueType2>) __glibcxx_function_requires(_ConvertibleConcept<_ValueType2, _ValueType1>) typedef typename iterator_traits<_ForwardIterator1>::reference _ReferenceType1; typedef typename iterator_traits<_ForwardIterator2>::reference _ReferenceType2; std::__iter_swap<__are_same<_ValueType1, _ValueType2>::__value && __are_same<_ValueType1&, _ReferenceType1>::__value && __are_same<_ValueType2&, _ReferenceType2>::__value>:: iter_swap(__a, __b); #else swap(*__a, *__b); #endif } /** * @brief Swap the elements of two sequences. * @ingroup mutating_algorithms * @param __first1 A forward iterator. * @param __last1 A forward iterator. * @param __first2 A forward iterator. * @return An iterator equal to @p first2+(last1-first1). * * Swaps each element in the range @p [first1,last1) with the * corresponding element in the range @p [first2,(last1-first1)). * The ranges must not overlap. */ template<typename _ForwardIterator1, typename _ForwardIterator2> _ForwardIterator2 swap_ranges(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2) { // concept requirements __glibcxx_function_requires(_Mutable_ForwardIteratorConcept< _ForwardIterator1>) __glibcxx_function_requires(_Mutable_ForwardIteratorConcept< _ForwardIterator2>) __glibcxx_requires_valid_range(__first1, __last1); for (; __first1 != __last1; ++__first1, ++__first2) std::iter_swap(__first1, __first2); return __first2; } /** * @brief This does what you think it does. * @ingroup sorting_algorithms * @param __a A thing of arbitrary type. * @param __b Another thing of arbitrary type. * @return The lesser of the parameters. * * This is the simple classic generic implementation. It will work on * temporary expressions, since they are only evaluated once, unlike a * preprocessor macro. */ template<typename _Tp> inline const _Tp& min(const _Tp& __a, const _Tp& __b) { // concept requirements __glibcxx_function_requires(_LessThanComparableConcept<_Tp>) //return __b < __a ? __b : __a; if (__b < __a) return __b; return __a; } /** * @brief This does what you think it does. * @ingroup sorting_algorithms * @param __a A thing of arbitrary type. * @param __b Another thing of arbitrary type. * @return The greater of the parameters. * * This is the simple classic generic implementation. It will work on * temporary expressions, since they are only evaluated once, unlike a * preprocessor macro. */ template<typename _Tp> inline const _Tp& max(const _Tp& __a, const _Tp& __b) { // concept requirements __glibcxx_function_requires(_LessThanComparableConcept<_Tp>) //return __a < __b ? __b : __a; if (__a < __b) return __b; return __a; } /** * @brief This does what you think it does. * @ingroup sorting_algorithms * @param __a A thing of arbitrary type. * @param __b Another thing of arbitrary type. * @param __comp A @link comparison_functors comparison functor@endlink. * @return The lesser of the parameters. * * This will work on temporary expressions, since they are only evaluated * once, unlike a preprocessor macro. */ template<typename _Tp, typename _Compare> inline const _Tp& min(const _Tp& __a, const _Tp& __b, _Compare __comp) { //return __comp(__b, __a) ? __b : __a; if (__comp(__b, __a)) return __b; return __a; } /** * @brief This does what you think it does. * @ingroup sorting_algorithms * @param __a A thing of arbitrary type. * @param __b Another thing of arbitrary type. * @param __comp A @link comparison_functors comparison functor@endlink. * @return The greater of the parameters. * * This will work on temporary expressions, since they are only evaluated * once, unlike a preprocessor macro. */ template<typename _Tp, typename _Compare> inline const _Tp& max(const _Tp& __a, const _Tp& __b, _Compare __comp) { //return __comp(__a, __b) ? __b : __a; if (__comp(__a, __b)) return __b; return __a; } // If _Iterator is a __normal_iterator return its base (a plain pointer, // normally) otherwise return it untouched. See copy, fill, ... template<typename _Iterator> struct _Niter_base : _Iter_base<_Iterator, __is_normal_iterator<_Iterator>::__value> { }; template<typename _Iterator> inline typename _Niter_base<_Iterator>::iterator_type __niter_base(_Iterator __it) { return std::_Niter_base<_Iterator>::_S_base(__it); } // Likewise, for move_iterator. template<typename _Iterator> struct _Miter_base : _Iter_base<_Iterator, __is_move_iterator<_Iterator>::__value> { }; template<typename _Iterator> inline typename _Miter_base<_Iterator>::iterator_type __miter_base(_Iterator __it) { return std::_Miter_base<_Iterator>::_S_base(__it); } // All of these auxiliary structs serve two purposes. (1) Replace // calls to copy with memmove whenever possible. (Memmove, not memcpy, // because the input and output ranges are permitted to overlap.) // (2) If we're using random access iterators, then write the loop as // a for loop with an explicit count. template<bool, bool, typename> struct __copy_move { template<typename _II, typename _OI> static _OI __copy_m(_II __first, _II __last, _OI __result) { for (; __first != __last; ++__result, ++__first) *__result = *__first; return __result; } }; #if __cplusplus >= 201103L template<typename _Category> struct __copy_move<true, false, _Category> { template<typename _II, typename _OI> static _OI __copy_m(_II __first, _II __last, _OI __result) { for (; __first != __last; ++__result, ++__first) *__result = std::move(*__first); return __result; } }; #endif template<> struct __copy_move<false, false, random_access_iterator_tag> { template<typename _II, typename _OI> static _OI __copy_m(_II __first, _II __last, _OI __result) { typedef typename iterator_traits<_II>::difference_type _Distance; for(_Distance __n = __last - __first; __n > 0; --__n) { *__result = *__first; ++__first; ++__result; } return __result; } }; #if __cplusplus >= 201103L template<> struct __copy_move<true, false, random_access_iterator_tag> { template<typename _II, typename _OI> static _OI __copy_m(_II __first, _II __last, _OI __result) { typedef typename iterator_traits<_II>::difference_type _Distance; for(_Distance __n = __last - __first; __n > 0; --__n) { *__result = std::move(*__first); ++__first; ++__result; } return __result; } }; #endif template<bool _IsMove> struct __copy_move<_IsMove, true, random_access_iterator_tag> { template<typename _Tp> static _Tp* __copy_m(const _Tp* __first, const _Tp* __last, _Tp* __result) { #if __cplusplus >= 201103L // trivial types can have deleted assignment static_assert( is_copy_assignable<_Tp>::value, "type is not assignable" ); #endif const ptrdiff_t _Num = __last - __first; if (_Num) __builtin_memmove(__result, __first, sizeof(_Tp) * _Num); return __result + _Num; } }; template<bool _IsMove, typename _II, typename _OI> inline _OI __copy_move_a(_II __first, _II __last, _OI __result) { typedef typename iterator_traits<_II>::value_type _ValueTypeI; typedef typename iterator_traits<_OI>::value_type _ValueTypeO; typedef typename iterator_traits<_II>::iterator_category _Category; const bool __simple = (__is_trivial(_ValueTypeI) && __is_pointer<_II>::__value && __is_pointer<_OI>::__value && __are_same<_ValueTypeI, _ValueTypeO>::__value); return std::__copy_move<_IsMove, __simple, _Category>::__copy_m(__first, __last, __result); } // Helpers for streambuf iterators (either istream or ostream). // NB: avoid including <iosfwd>, relatively large. template<typename _CharT> struct char_traits; template<typename _CharT, typename _Traits> class istreambuf_iterator; template<typename _CharT, typename _Traits> class ostreambuf_iterator; template<bool _IsMove, typename _CharT> typename __gnu_cxx::__enable_if<__is_char<_CharT>::__value, ostreambuf_iterator<_CharT, char_traits<_CharT> > >::__type __copy_move_a2(_CharT*, _CharT*, ostreambuf_iterator<_CharT, char_traits<_CharT> >); template<bool _IsMove, typename _CharT> typename __gnu_cxx::__enable_if<__is_char<_CharT>::__value, ostreambuf_iterator<_CharT, char_traits<_CharT> > >::__type __copy_move_a2(const _CharT*, const _CharT*, ostreambuf_iterator<_CharT, char_traits<_CharT> >); template<bool _IsMove, typename _CharT> typename __gnu_cxx::__enable_if<__is_char<_CharT>::__value, _CharT*>::__type __copy_move_a2(istreambuf_iterator<_CharT, char_traits<_CharT> >, istreambuf_iterator<_CharT, char_traits<_CharT> >, _CharT*); template<bool _IsMove, typename _II, typename _OI> inline _OI __copy_move_a2(_II __first, _II __last, _OI __result) { return _OI(std::__copy_move_a<_IsMove>(std::__niter_base(__first), std::__niter_base(__last), std::__niter_base(__result))); } /** * @brief Copies the range [first,last) into result. * @ingroup mutating_algorithms * @param __first An input iterator. * @param __last An input iterator. * @param __result An output iterator. * @return result + (first - last) * * This inline function will boil down to a call to @c memmove whenever * possible. Failing that, if random access iterators are passed, then the * loop count will be known (and therefore a candidate for compiler * optimizations such as unrolling). Result may not be contained within * [first,last); the copy_backward function should be used instead. * * Note that the end of the output range is permitted to be contained * within [first,last). */ template<typename _II, typename _OI> inline _OI copy(_II __first, _II __last, _OI __result) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_II>) __glibcxx_function_requires(_OutputIteratorConcept<_OI, typename iterator_traits<_II>::value_type>) __glibcxx_requires_valid_range(__first, __last); return (std::__copy_move_a2<__is_move_iterator<_II>::__value> (std::__miter_base(__first), std::__miter_base(__last), __result)); } #if __cplusplus >= 201103L /** * @brief Moves the range [first,last) into result. * @ingroup mutating_algorithms * @param __first An input iterator. * @param __last An input iterator. * @param __result An output iterator. * @return result + (first - last) * * This inline function will boil down to a call to @c memmove whenever * possible. Failing that, if random access iterators are passed, then the * loop count will be known (and therefore a candidate for compiler * optimizations such as unrolling). Result may not be contained within * [first,last); the move_backward function should be used instead. * * Note that the end of the output range is permitted to be contained * within [first,last). */ template<typename _II, typename _OI> inline _OI move(_II __first, _II __last, _OI __result) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_II>) __glibcxx_function_requires(_OutputIteratorConcept<_OI, typename iterator_traits<_II>::value_type>) __glibcxx_requires_valid_range(__first, __last); return std::__copy_move_a2<true>(std::__miter_base(__first), std::__miter_base(__last), __result); } #define _GLIBCXX_MOVE3(_Tp, _Up, _Vp) std::move(_Tp, _Up, _Vp) #else #define _GLIBCXX_MOVE3(_Tp, _Up, _Vp) std::copy(_Tp, _Up, _Vp) #endif template<bool, bool, typename> struct __copy_move_backward { template<typename _BI1, typename _BI2> static _BI2 __copy_move_b(_BI1 __first, _BI1 __last, _BI2 __result) { while (__first != __last) *--__result = *--__last; return __result; } }; #if __cplusplus >= 201103L template<typename _Category> struct __copy_move_backward<true, false, _Category> { template<typename _BI1, typename _BI2> static _BI2 __copy_move_b(_BI1 __first, _BI1 __last, _BI2 __result) { while (__first != __last) *--__result = std::move(*--__last); return __result; } }; #endif template<> struct __copy_move_backward<false, false, random_access_iterator_tag> { template<typename _BI1, typename _BI2> static _BI2 __copy_move_b(_BI1 __first, _BI1 __last, _BI2 __result) { typename iterator_traits<_BI1>::difference_type __n; for (__n = __last - __first; __n > 0; --__n) *--__result = *--__last; return __result; } }; #if __cplusplus >= 201103L template<> struct __copy_move_backward<true, false, random_access_iterator_tag> { template<typename _BI1, typename _BI2> static _BI2 __copy_move_b(_BI1 __first, _BI1 __last, _BI2 __result) { typename iterator_traits<_BI1>::difference_type __n; for (__n = __last - __first; __n > 0; --__n) *--__result = std::move(*--__last); return __result; } }; #endif template<bool _IsMove> struct __copy_move_backward<_IsMove, true, random_access_iterator_tag> { template<typename _Tp> static _Tp* __copy_move_b(const _Tp* __first, const _Tp* __last, _Tp* __result) { #if __cplusplus >= 201103L // trivial types can have deleted assignment static_assert( is_copy_assignable<_Tp>::value, "type is not assignable" ); #endif const ptrdiff_t _Num = __last - __first; if (_Num) __builtin_memmove(__result - _Num, __first, sizeof(_Tp) * _Num); return __result - _Num; } }; template<bool _IsMove, typename _BI1, typename _BI2> inline _BI2 __copy_move_backward_a(_BI1 __first, _BI1 __last, _BI2 __result) { typedef typename iterator_traits<_BI1>::value_type _ValueType1; typedef typename iterator_traits<_BI2>::value_type _ValueType2; typedef typename iterator_traits<_BI1>::iterator_category _Category; const bool __simple = (__is_trivial(_ValueType1) && __is_pointer<_BI1>::__value && __is_pointer<_BI2>::__value && __are_same<_ValueType1, _ValueType2>::__value); return std::__copy_move_backward<_IsMove, __simple, _Category>::__copy_move_b(__first, __last, __result); } template<bool _IsMove, typename _BI1, typename _BI2> inline _BI2 __copy_move_backward_a2(_BI1 __first, _BI1 __last, _BI2 __result) { return _BI2(std::__copy_move_backward_a<_IsMove> (std::__niter_base(__first), std::__niter_base(__last), std::__niter_base(__result))); } /** * @brief Copies the range [first,last) into result. * @ingroup mutating_algorithms * @param __first A bidirectional iterator. * @param __last A bidirectional iterator. * @param __result A bidirectional iterator. * @return result - (first - last) * * The function has the same effect as copy, but starts at the end of the * range and works its way to the start, returning the start of the result. * This inline function will boil down to a call to @c memmove whenever * possible. Failing that, if random access iterators are passed, then the * loop count will be known (and therefore a candidate for compiler * optimizations such as unrolling). * * Result may not be in the range (first,last]. Use copy instead. Note * that the start of the output range may overlap [first,last). */ template<typename _BI1, typename _BI2> inline _BI2 copy_backward(_BI1 __first, _BI1 __last, _BI2 __result) { // concept requirements __glibcxx_function_requires(_BidirectionalIteratorConcept<_BI1>) __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<_BI2>) __glibcxx_function_requires(_ConvertibleConcept< typename iterator_traits<_BI1>::value_type, typename iterator_traits<_BI2>::value_type>) __glibcxx_requires_valid_range(__first, __last); return (std::__copy_move_backward_a2<__is_move_iterator<_BI1>::__value> (std::__miter_base(__first), std::__miter_base(__last), __result)); } #if __cplusplus >= 201103L /** * @brief Moves the range [first,last) into result. * @ingroup mutating_algorithms * @param __first A bidirectional iterator. * @param __last A bidirectional iterator. * @param __result A bidirectional iterator. * @return result - (first - last) * * The function has the same effect as move, but starts at the end of the * range and works its way to the start, returning the start of the result. * This inline function will boil down to a call to @c memmove whenever * possible. Failing that, if random access iterators are passed, then the * loop count will be known (and therefore a candidate for compiler * optimizations such as unrolling). * * Result may not be in the range (first,last]. Use move instead. Note * that the start of the output range may overlap [first,last). */ template<typename _BI1, typename _BI2> inline _BI2 move_backward(_BI1 __first, _BI1 __last, _BI2 __result) { // concept requirements __glibcxx_function_requires(_BidirectionalIteratorConcept<_BI1>) __glibcxx_function_requires(_Mutable_BidirectionalIteratorConcept<_BI2>) __glibcxx_function_requires(_ConvertibleConcept< typename iterator_traits<_BI1>::value_type, typename iterator_traits<_BI2>::value_type>) __glibcxx_requires_valid_range(__first, __last); return std::__copy_move_backward_a2<true>(std::__miter_base(__first), std::__miter_base(__last), __result); } #define _GLIBCXX_MOVE_BACKWARD3(_Tp, _Up, _Vp) std::move_backward(_Tp, _Up, _Vp) #else #define _GLIBCXX_MOVE_BACKWARD3(_Tp, _Up, _Vp) std::copy_backward(_Tp, _Up, _Vp) #endif template<typename _ForwardIterator, typename _Tp> inline typename __gnu_cxx::__enable_if<!__is_scalar<_Tp>::__value, void>::__type __fill_a(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { for (; __first != __last; ++__first) *__first = __value; } template<typename _ForwardIterator, typename _Tp> inline typename __gnu_cxx::__enable_if<__is_scalar<_Tp>::__value, void>::__type __fill_a(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { const _Tp __tmp = __value; for (; __first != __last; ++__first) *__first = __tmp; } // Specialization: for char types we can use memset. template<typename _Tp> inline typename __gnu_cxx::__enable_if<__is_byte<_Tp>::__value, void>::__type __fill_a(_Tp* __first, _Tp* __last, const _Tp& __c) { const _Tp __tmp = __c; __builtin_memset(__first, static_cast<unsigned char>(__tmp), __last - __first); } /** * @brief Fills the range [first,last) with copies of value. * @ingroup mutating_algorithms * @param __first A forward iterator. * @param __last A forward iterator. * @param __value A reference-to-const of arbitrary type. * @return Nothing. * * This function fills a range with copies of the same value. For char * types filling contiguous areas of memory, this becomes an inline call * to @c memset or @c wmemset. */ template<typename _ForwardIterator, typename _Tp> inline void fill(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { // concept requirements __glibcxx_function_requires(_Mutable_ForwardIteratorConcept< _ForwardIterator>) __glibcxx_requires_valid_range(__first, __last); std::__fill_a(std::__niter_base(__first), std::__niter_base(__last), __value); } template<typename _OutputIterator, typename _Size, typename _Tp> inline typename __gnu_cxx::__enable_if<!__is_scalar<_Tp>::__value, _OutputIterator>::__type __fill_n_a(_OutputIterator __first, _Size __n, const _Tp& __value) { for (__decltype(__n + 0) __niter = __n; __niter > 0; --__niter, ++__first) *__first = __value; return __first; } template<typename _OutputIterator, typename _Size, typename _Tp> inline typename __gnu_cxx::__enable_if<__is_scalar<_Tp>::__value, _OutputIterator>::__type __fill_n_a(_OutputIterator __first, _Size __n, const _Tp& __value) { const _Tp __tmp = __value; for (__decltype(__n + 0) __niter = __n; __niter > 0; --__niter, ++__first) *__first = __tmp; return __first; } template<typename _Size, typename _Tp> inline typename __gnu_cxx::__enable_if<__is_byte<_Tp>::__value, _Tp*>::__type __fill_n_a(_Tp* __first, _Size __n, const _Tp& __c) { std::__fill_a(__first, __first + __n, __c); return __first + __n; } /** * @brief Fills the range [first,first+n) with copies of value. * @ingroup mutating_algorithms * @param __first An output iterator. * @param __n The count of copies to perform. * @param __value A reference-to-const of arbitrary type. * @return The iterator at first+n. * * This function fills a range with copies of the same value. For char * types filling contiguous areas of memory, this becomes an inline call * to @c memset or @ wmemset. * * _GLIBCXX_RESOLVE_LIB_DEFECTS * DR 865. More algorithms that throw away information */ template<typename _OI, typename _Size, typename _Tp> inline _OI fill_n(_OI __first, _Size __n, const _Tp& __value) { // concept requirements __glibcxx_function_requires(_OutputIteratorConcept<_OI, _Tp>) return _OI(std::__fill_n_a(std::__niter_base(__first), __n, __value)); } template<bool _BoolType> struct __equal { template<typename _II1, typename _II2> static bool equal(_II1 __first1, _II1 __last1, _II2 __first2) { for (; __first1 != __last1; ++__first1, ++__first2) if (!(*__first1 == *__first2)) return false; return true; } }; template<> struct __equal<true> { template<typename _Tp> static bool equal(const _Tp* __first1, const _Tp* __last1, const _Tp* __first2) { return !__builtin_memcmp(__first1, __first2, sizeof(_Tp) * (__last1 - __first1)); } }; template<typename _II1, typename _II2> inline bool __equal_aux(_II1 __first1, _II1 __last1, _II2 __first2) { typedef typename iterator_traits<_II1>::value_type _ValueType1; typedef typename iterator_traits<_II2>::value_type _ValueType2; const bool __simple = ((__is_integer<_ValueType1>::__value || __is_pointer<_ValueType1>::__value) && __is_pointer<_II1>::__value && __is_pointer<_II2>::__value && __are_same<_ValueType1, _ValueType2>::__value); return std::__equal<__simple>::equal(__first1, __last1, __first2); } template<typename, typename> struct __lc_rai { template<typename _II1, typename _II2> static _II1 __newlast1(_II1, _II1 __last1, _II2, _II2) { return __last1; } template<typename _II> static bool __cnd2(_II __first, _II __last) { return __first != __last; } }; template<> struct __lc_rai<random_access_iterator_tag, random_access_iterator_tag> { template<typename _RAI1, typename _RAI2> static _RAI1 __newlast1(_RAI1 __first1, _RAI1 __last1, _RAI2 __first2, _RAI2 __last2) { const typename iterator_traits<_RAI1>::difference_type __diff1 = __last1 - __first1; const typename iterator_traits<_RAI2>::difference_type __diff2 = __last2 - __first2; return __diff2 < __diff1 ? __first1 + __diff2 : __last1; } template<typename _RAI> static bool __cnd2(_RAI, _RAI) { return true; } }; template<typename _II1, typename _II2, typename _Compare> bool __lexicographical_compare_impl(_II1 __first1, _II1 __last1, _II2 __first2, _II2 __last2, _Compare __comp) { typedef typename iterator_traits<_II1>::iterator_category _Category1; typedef typename iterator_traits<_II2>::iterator_category _Category2; typedef std::__lc_rai<_Category1, _Category2> __rai_type; __last1 = __rai_type::__newlast1(__first1, __last1, __first2, __last2); for (; __first1 != __last1 && __rai_type::__cnd2(__first2, __last2); ++__first1, ++__first2) { if (__comp(__first1, __first2)) return true; if (__comp(__first2, __first1)) return false; } return __first1 == __last1 && __first2 != __last2; } template<bool _BoolType> struct __lexicographical_compare { template<typename _II1, typename _II2> static bool __lc(_II1, _II1, _II2, _II2); }; template<bool _BoolType> template<typename _II1, typename _II2> bool __lexicographical_compare<_BoolType>:: __lc(_II1 __first1, _II1 __last1, _II2 __first2, _II2 __last2) { return std::__lexicographical_compare_impl(__first1, __last1, __first2, __last2, __gnu_cxx::__ops::__iter_less_iter()); } template<> struct __lexicographical_compare<true> { template<typename _Tp, typename _Up> static bool __lc(const _Tp* __first1, const _Tp* __last1, const _Up* __first2, const _Up* __last2) { const size_t __len1 = __last1 - __first1; const size_t __len2 = __last2 - __first2; const int __result = __builtin_memcmp(__first1, __first2, std::min(__len1, __len2)); return __result != 0 ? __result < 0 : __len1 < __len2; } }; template<typename _II1, typename _II2> inline bool __lexicographical_compare_aux(_II1 __first1, _II1 __last1, _II2 __first2, _II2 __last2) { typedef typename iterator_traits<_II1>::value_type _ValueType1; typedef typename iterator_traits<_II2>::value_type _ValueType2; const bool __simple = (__is_byte<_ValueType1>::__value && __is_byte<_ValueType2>::__value && !__gnu_cxx::__numeric_traits<_ValueType1>::__is_signed && !__gnu_cxx::__numeric_traits<_ValueType2>::__is_signed && __is_pointer<_II1>::__value && __is_pointer<_II2>::__value); return std::__lexicographical_compare<__simple>::__lc(__first1, __last1, __first2, __last2); } template<typename _ForwardIterator, typename _Tp, typename _Compare> _ForwardIterator __lower_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __val, _Compare __comp) { typedef typename iterator_traits<_ForwardIterator>::difference_type _DistanceType; _DistanceType __len = std::distance(__first, __last); while (__len > 0) { _DistanceType __half = __len >> 1; _ForwardIterator __middle = __first; std::advance(__middle, __half); if (__comp(__middle, __val)) { __first = __middle; ++__first; __len = __len - __half - 1; } else __len = __half; } return __first; } /** * @brief Finds the first position in which @a val could be inserted * without changing the ordering. * @param __first An iterator. * @param __last Another iterator. * @param __val The search term. * @return An iterator pointing to the first element <em>not less * than</em> @a val, or end() if every element is less than * @a val. * @ingroup binary_search_algorithms */ template<typename _ForwardIterator, typename _Tp> inline _ForwardIterator lower_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __val) { // concept requirements __glibcxx_function_requires(_ForwardIteratorConcept<_ForwardIterator>) __glibcxx_function_requires(_LessThanOpConcept< typename iterator_traits<_ForwardIterator>::value_type, _Tp>) __glibcxx_requires_partitioned_lower(__first, __last, __val); return std::__lower_bound(__first, __last, __val, __gnu_cxx::__ops::__iter_less_val()); } /// This is a helper function for the sort routines and for random.tcc. // Precondition: __n > 0. inline _GLIBCXX_CONSTEXPR int __lg(int __n) { return sizeof(int) * __CHAR_BIT__ - 1 - __builtin_clz(__n); } inline _GLIBCXX_CONSTEXPR unsigned __lg(unsigned __n) { return sizeof(int) * __CHAR_BIT__ - 1 - __builtin_clz(__n); } inline _GLIBCXX_CONSTEXPR long __lg(long __n) { return sizeof(long) * __CHAR_BIT__ - 1 - __builtin_clzl(__n); } inline _GLIBCXX_CONSTEXPR unsigned long __lg(unsigned long __n) { return sizeof(long) * __CHAR_BIT__ - 1 - __builtin_clzl(__n); } inline _GLIBCXX_CONSTEXPR long long __lg(long long __n) { return sizeof(long long) * __CHAR_BIT__ - 1 - __builtin_clzll(__n); } inline _GLIBCXX_CONSTEXPR unsigned long long __lg(unsigned long long __n) { return sizeof(long long) * __CHAR_BIT__ - 1 - __builtin_clzll(__n); } _GLIBCXX_END_NAMESPACE_VERSION _GLIBCXX_BEGIN_NAMESPACE_ALGO /** * @brief Tests a range for element-wise equality. * @ingroup non_mutating_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @return A boolean true or false. * * This compares the elements of two ranges using @c == and returns true or * false depending on whether all of the corresponding elements of the * ranges are equal. */ template<typename _II1, typename _II2> inline bool equal(_II1 __first1, _II1 __last1, _II2 __first2) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_II1>) __glibcxx_function_requires(_InputIteratorConcept<_II2>) __glibcxx_function_requires(_EqualOpConcept< typename iterator_traits<_II1>::value_type, typename iterator_traits<_II2>::value_type>) __glibcxx_requires_valid_range(__first1, __last1); return std::__equal_aux(std::__niter_base(__first1), std::__niter_base(__last1), std::__niter_base(__first2)); } /** * @brief Tests a range for element-wise equality. * @ingroup non_mutating_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @param __binary_pred A binary predicate @link functors * functor@endlink. * @return A boolean true or false. * * This compares the elements of two ranges using the binary_pred * parameter, and returns true or * false depending on whether all of the corresponding elements of the * ranges are equal. */ template<typename _IIter1, typename _IIter2, typename _BinaryPredicate> inline bool equal(_IIter1 __first1, _IIter1 __last1, _IIter2 __first2, _BinaryPredicate __binary_pred) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_IIter1>) __glibcxx_function_requires(_InputIteratorConcept<_IIter2>) __glibcxx_requires_valid_range(__first1, __last1); for (; __first1 != __last1; ++__first1, ++__first2) if (!bool(__binary_pred(*__first1, *__first2))) return false; return true; } #if __cplusplus > 201103L #define __cpp_lib_robust_nonmodifying_seq_ops 201304 /** * @brief Tests a range for element-wise equality. * @ingroup non_mutating_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @param __last2 An input iterator. * @return A boolean true or false. * * This compares the elements of two ranges using @c == and returns true or * false depending on whether all of the corresponding elements of the * ranges are equal. */ template<typename _II1, typename _II2> inline bool equal(_II1 __first1, _II1 __last1, _II2 __first2, _II2 __last2) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_II1>) __glibcxx_function_requires(_InputIteratorConcept<_II2>) __glibcxx_function_requires(_EqualOpConcept< typename iterator_traits<_II1>::value_type, typename iterator_traits<_II2>::value_type>) __glibcxx_requires_valid_range(__first1, __last1); __glibcxx_requires_valid_range(__first2, __last2); using _RATag = random_access_iterator_tag; using _Cat1 = typename iterator_traits<_II1>::iterator_category; using _Cat2 = typename iterator_traits<_II2>::iterator_category; using _RAIters = __and_<is_same<_Cat1, _RATag>, is_same<_Cat2, _RATag>>; if (_RAIters()) { auto __d1 = std::distance(__first1, __last1); auto __d2 = std::distance(__first2, __last2); if (__d1 != __d2) return false; return _GLIBCXX_STD_A::equal(__first1, __last1, __first2); } for (; __first1 != __last1 && __first2 != __last2; ++__first1, ++__first2) if (!(*__first1 == *__first2)) return false; return __first1 == __last1 && __first2 == __last2; } /** * @brief Tests a range for element-wise equality. * @ingroup non_mutating_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @param __last2 An input iterator. * @param __binary_pred A binary predicate @link functors * functor@endlink. * @return A boolean true or false. * * This compares the elements of two ranges using the binary_pred * parameter, and returns true or * false depending on whether all of the corresponding elements of the * ranges are equal. */ template<typename _IIter1, typename _IIter2, typename _BinaryPredicate> inline bool equal(_IIter1 __first1, _IIter1 __last1, _IIter2 __first2, _IIter2 __last2, _BinaryPredicate __binary_pred) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_IIter1>) __glibcxx_function_requires(_InputIteratorConcept<_IIter2>) __glibcxx_requires_valid_range(__first1, __last1); __glibcxx_requires_valid_range(__first2, __last2); using _RATag = random_access_iterator_tag; using _Cat1 = typename iterator_traits<_IIter1>::iterator_category; using _Cat2 = typename iterator_traits<_IIter2>::iterator_category; using _RAIters = __and_<is_same<_Cat1, _RATag>, is_same<_Cat2, _RATag>>; if (_RAIters()) { auto __d1 = std::distance(__first1, __last1); auto __d2 = std::distance(__first2, __last2); if (__d1 != __d2) return false; return _GLIBCXX_STD_A::equal(__first1, __last1, __first2, __binary_pred); } for (; __first1 != __last1 && __first2 != __last2; ++__first1, ++__first2) if (!bool(__binary_pred(*__first1, *__first2))) return false; return __first1 == __last1 && __first2 == __last2; } #endif /** * @brief Performs @b dictionary comparison on ranges. * @ingroup sorting_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @param __last2 An input iterator. * @return A boolean true or false. * * Returns true if the sequence of <em>elements defined by the range * [first1,last1) is lexicographically less than the sequence of elements * defined by the range [first2,last2). Returns false otherwise.</em> * (Quoted from [25.3.8]/1.) If the iterators are all character pointers, * then this is an inline call to @c memcmp. */ template<typename _II1, typename _II2> inline bool lexicographical_compare(_II1 __first1, _II1 __last1, _II2 __first2, _II2 __last2) { #ifdef _GLIBCXX_CONCEPT_CHECKS // concept requirements typedef typename iterator_traits<_II1>::value_type _ValueType1; typedef typename iterator_traits<_II2>::value_type _ValueType2; #endif __glibcxx_function_requires(_InputIteratorConcept<_II1>) __glibcxx_function_requires(_InputIteratorConcept<_II2>) __glibcxx_function_requires(_LessThanOpConcept<_ValueType1, _ValueType2>) __glibcxx_function_requires(_LessThanOpConcept<_ValueType2, _ValueType1>) __glibcxx_requires_valid_range(__first1, __last1); __glibcxx_requires_valid_range(__first2, __last2); return std::__lexicographical_compare_aux(std::__niter_base(__first1), std::__niter_base(__last1), std::__niter_base(__first2), std::__niter_base(__last2)); } /** * @brief Performs @b dictionary comparison on ranges. * @ingroup sorting_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @param __last2 An input iterator. * @param __comp A @link comparison_functors comparison functor@endlink. * @return A boolean true or false. * * The same as the four-parameter @c lexicographical_compare, but uses the * comp parameter instead of @c <. */ template<typename _II1, typename _II2, typename _Compare> inline bool lexicographical_compare(_II1 __first1, _II1 __last1, _II2 __first2, _II2 __last2, _Compare __comp) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_II1>) __glibcxx_function_requires(_InputIteratorConcept<_II2>) __glibcxx_requires_valid_range(__first1, __last1); __glibcxx_requires_valid_range(__first2, __last2); return std::__lexicographical_compare_impl (__first1, __last1, __first2, __last2, __gnu_cxx::__ops::__iter_comp_iter(__comp)); } template<typename _InputIterator1, typename _InputIterator2, typename _BinaryPredicate> pair<_InputIterator1, _InputIterator2> __mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _BinaryPredicate __binary_pred) { while (__first1 != __last1 && __binary_pred(__first1, __first2)) { ++__first1; ++__first2; } return pair<_InputIterator1, _InputIterator2>(__first1, __first2); } /** * @brief Finds the places in ranges which don't match. * @ingroup non_mutating_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @return A pair of iterators pointing to the first mismatch. * * This compares the elements of two ranges using @c == and returns a pair * of iterators. The first iterator points into the first range, the * second iterator points into the second range, and the elements pointed * to by the iterators are not equal. */ template<typename _InputIterator1, typename _InputIterator2> inline pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>) __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>) __glibcxx_function_requires(_EqualOpConcept< typename iterator_traits<_InputIterator1>::value_type, typename iterator_traits<_InputIterator2>::value_type>) __glibcxx_requires_valid_range(__first1, __last1); return _GLIBCXX_STD_A::__mismatch(__first1, __last1, __first2, __gnu_cxx::__ops::__iter_equal_to_iter()); } /** * @brief Finds the places in ranges which don't match. * @ingroup non_mutating_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @param __binary_pred A binary predicate @link functors * functor@endlink. * @return A pair of iterators pointing to the first mismatch. * * This compares the elements of two ranges using the binary_pred * parameter, and returns a pair * of iterators. The first iterator points into the first range, the * second iterator points into the second range, and the elements pointed * to by the iterators are not equal. */ template<typename _InputIterator1, typename _InputIterator2, typename _BinaryPredicate> inline pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _BinaryPredicate __binary_pred) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>) __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>) __glibcxx_requires_valid_range(__first1, __last1); return _GLIBCXX_STD_A::__mismatch(__first1, __last1, __first2, __gnu_cxx::__ops::__iter_comp_iter(__binary_pred)); } #if __cplusplus > 201103L template<typename _InputIterator1, typename _InputIterator2, typename _BinaryPredicate> pair<_InputIterator1, _InputIterator2> __mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2, _BinaryPredicate __binary_pred) { while (__first1 != __last1 && __first2 != __last2 && __binary_pred(__first1, __first2)) { ++__first1; ++__first2; } return pair<_InputIterator1, _InputIterator2>(__first1, __first2); } /** * @brief Finds the places in ranges which don't match. * @ingroup non_mutating_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @param __last2 An input iterator. * @return A pair of iterators pointing to the first mismatch. * * This compares the elements of two ranges using @c == and returns a pair * of iterators. The first iterator points into the first range, the * second iterator points into the second range, and the elements pointed * to by the iterators are not equal. */ template<typename _InputIterator1, typename _InputIterator2> inline pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>) __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>) __glibcxx_function_requires(_EqualOpConcept< typename iterator_traits<_InputIterator1>::value_type, typename iterator_traits<_InputIterator2>::value_type>) __glibcxx_requires_valid_range(__first1, __last1); __glibcxx_requires_valid_range(__first2, __last2); return _GLIBCXX_STD_A::__mismatch(__first1, __last1, __first2, __last2, __gnu_cxx::__ops::__iter_equal_to_iter()); } /** * @brief Finds the places in ranges which don't match. * @ingroup non_mutating_algorithms * @param __first1 An input iterator. * @param __last1 An input iterator. * @param __first2 An input iterator. * @param __last2 An input iterator. * @param __binary_pred A binary predicate @link functors * functor@endlink. * @return A pair of iterators pointing to the first mismatch. * * This compares the elements of two ranges using the binary_pred * parameter, and returns a pair * of iterators. The first iterator points into the first range, the * second iterator points into the second range, and the elements pointed * to by the iterators are not equal. */ template<typename _InputIterator1, typename _InputIterator2, typename _BinaryPredicate> inline pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2, _BinaryPredicate __binary_pred) { // concept requirements __glibcxx_function_requires(_InputIteratorConcept<_InputIterator1>) __glibcxx_function_requires(_InputIteratorConcept<_InputIterator2>) __glibcxx_requires_valid_range(__first1, __last1); __glibcxx_requires_valid_range(__first2, __last2); return _GLIBCXX_STD_A::__mismatch(__first1, __last1, __first2, __last2, __gnu_cxx::__ops::__iter_comp_iter(__binary_pred)); } #endif _GLIBCXX_END_NAMESPACE_ALGO } // namespace std // NB: This file is included within many other C++ includes, as a way // of getting the base algorithms. So, make sure that parallel bits // come in too if requested. #ifdef _GLIBCXX_PARALLEL # include <parallel/algobase.h> #endif #endif
最新发布
12-20
#include <iostream> #include <vector> #include <algorithm> #include <functional> // std::greater #include <string> #include <cctype> int main() { std::vector<int> vec = {5, 2, 8, 2, 9, 1, 5, 5}; std::vector<int> vec2(8); std::vector<std::string> words = {"apple", "Banana", "cherry"}; // 1. sort 排序 std::sort(vec.begin(), vec.end()); std::cout << "Sorted: "; for (int x : vec) std::cout << x << " "; std::cout << "\n"; // 2. reverse 反转 std::reverse(vec.begin(), vec.end()); std::cout << "Reversed: "; for (int x : vec) std::cout << x << " "; std::cout << "\n"; // 3. find 查找 auto it = std::find(vec.begin(), vec.end(), 8); if (it != vec.end()) { std::cout << "Found 8 at index: " << std::distance(vec.begin(), it) << "\n"; } // 4. count 统计 int cnt = std::count(vec.begin(), vec.end(), 5); std::cout << "Count of 5: " << cnt << "\n"; // 5. transform 转换(例如平方) std::transform(vec.begin(), vec.end(), vec2.begin(), [](int x) { return x * x; }); std::cout << "Transformed (squared): "; for (int x : vec2) std::cout << x << " "; std::cout << "\n"; // 6. replace 替换 std::replace(vec2.begin(), vec2.end(), 25, 100); // 把所有25换成100 std::cout << "After replace 25 -> 100: "; for (int x : vec2) std::cout << x << " "; std::cout << "\n"; // 7. unique 去除相邻重复(必须先排序) std::sort(vec.begin(), vec.end()); auto new_end = std::unique(vec.begin(), vec.end()); vec.erase(new_end, vec.end()); // 实际删除 std::cout << "After unique: "; for (int x : vec) std::cout << x << " "; std::cout << "\n"; // 8. min_element / max_element auto min_it = std::min_element(vec.begin(), vec.end()); auto max_it = std::max_element(vec.begin(), vec.end()); std::cout << "Min: " << *min_it << ", Max: " << *max_it << "\n"; // 9. binary_search (需要有序) bool found = std::binary_search(vec.begin(), vec.end(), 8); std::cout << "Binary search for 8: " << (found ? "found" : "not found") << "\n"; // 10. lower_bound / upper_bound auto lb = std::lower_bound(vec.begin(), vec.end(), 5); auto ub = std::upper_bound(vec.begin(), vec.end(), 5); std::cout << "Lower bound of 5 at index: " << std::distance(vec.begin(), lb) << "\n"; std::cout << "Upper bound of 5 at index: " << std::distance(vec.begin(), ub) << "\n"; // 11. next_permutation std::string s = "abc"; do { std::cout << s << " "; } while (std::next_permutation(s.begin(), s.end())); std::cout << "\n"; // 12. copy std::vector<int> copy_vec(vec.size()); std::copy(vec.begin(), vec.end(), copy_vec.begin()); std::cout << "Copy: "; for (int x : copy_vec) std::cout << x << " "; std::cout << "\n"; // 13. fill std::fill(vec2.begin(), vec2.end(), 0); std::cout << "After fill with 0: "; for (int x : vec2) std::cout << x << " "; std::cout << "\n"; // 14. equal 判断两个范围是否相同 bool same = std::equal(vec.begin(), vec.end(), copy_vec.begin()); std::cout << "vec and copy_vec are " << (same ? "equal" : "not equal") << "\n"; // 15. 自定义比较函数(降序排序) std::sort(copy_vec.begin(), copy_vec.end(), std::greater<int>()); std::cout << "Descending order: "; for (int x : copy_vec) std::cout << x << " "; std::cout << "\n"; // 16. find_if 使用谓词 auto even_it = std::find_if(copy_vec.begin(), copy_vec.end(), [](int x) { return x % 2 == 0; }); if (even_it != copy_vec.end()) { std::cout << "First even number: " << *even_it << "\n"; } // 17. count_if int even_count = std::count_if(copy_vec.begin(), copy_vec.end(), [](int x) { return x % 2 == 0; }); std::cout << "Number of even numbers: " << even_count << "\n"; return 0; } 代码中需要有简要注释函数功能
10-23
# -*- coding: utf-8 -*- from collections import defaultdict import networkx as nx from networkx.algorithms.flow import edmonds_karp, preflow_push import time import graphviz import random import csv #最大流算法选择,可选 ['ford_fulkerson', 'edmonds_karp', 'dinic'] ALGORITHM='dinic' # 读取数据 def read_paths(file_path): """ 读取文件并提取路径中的元素,对元素进行编号,并生成边。 编号时,路径的起点编号最小,终点编号最大。 """ # 读取所有路径 with open(file_path, 'r') as file: paths = [line.strip()[1:-1].split(", ") for line in file] # 对路径中的元素进行编号,确保起点最小,终点最大 path_elements = set() for path in paths: path_elements.update(path) sorted_elements = sorted(path_elements, key=lambda x: ( min((i for i, path in enumerate(paths) if x in (path[0], path[-1])), default=len(paths)), x )) numbered_elements = {element: idx for idx, element in enumerate(sorted_elements)} # 生成所有边 edges = set() for path in paths: for i in range(1, len(path)): start = numbered_elements[path[i - 1]] end = numbered_elements[path[i]] if (start, end) not in edges: edges.add((start, end)) # 返回id对应的元素 id_to_element = {v: k for k, v in numbered_elements.items()} # 返回编号边 return id_to_element, edges # 初始化 def init_graph(id_to_element, edges): # 初始化随机数种子 # random.seed(100) graph = {'nodes': {}, 'edges': {id_to_element[start]: [id_to_element[end] for end in id_to_element if (start, end) in edges] for start in id_to_element}} # 删除Host1到Host2的边 for idx, element in id_to_element.items(): if idx == 0 or idx == 1: graph['nodes'][element] = {'capacity': int(1e10)} elif any(s in id_to_element[idx] for s in ('Attacker', 'MULTIHOP', 'CONTROL', 'DOS', 'DISPLAYCLOSED', 'ISDISABLED', 'DEVICESTATE', 'OPENCB', 'TRIPCB', 'INJECTCODE')): graph['nodes'][element] = {'capacity': int(1e10)} else: split_node = element[1:-1].split(',') """ Host0 IIS_BUFFERFLOW 2 Host1 FTP_RHOST 2 Host1 SSH_BUFFERFLOW 2 Host1 RSH_LOGIN 3 Host2 NETBIOS_NULLSESSION 2 Historian SQUID_PORT_SCAN 5 Historian LICQ_REMOTE_TO_USER 2 Historian LOCAL_SETUID_BUFFERFLOW 2 Pv BRUTEFORCESMA 4 Scada ETERNALBLUEVUL 2 Scada SMBSENSILEAKVUL 2 Splc CODESYSVUL 3 Splc DROPBEARSSHVUL 3 Splc CHANNELHIJACK 5 Cplc CODESYSVUL 3 """ if split_node[0] == 'IIS_BUFFERFLOW' and 'Host0' in split_node[2]: graph['nodes'][element] = {'capacity': 2} elif split_node[0] == 'FTP_RHOST' and 'Host1' in split_node[2]: graph['nodes'][element] = {'capacity': 2} elif split_node[0] == 'SSH_BUFFERFLOW' and 'Host1' in split_node[2]: graph['nodes'][element] = {'capacity': 2} elif split_node[0] == 'RSH_LOGIN' and 'Host1' in split_node[2]: graph['nodes'][element] = {'capacity': 3} elif split_node[0] == 'NETBIOS_NULLSESSION' and 'Host2' in split_node[2]: graph['nodes'][element] = {'capacity': 4} elif split_node[0] == 'SQUID_PORT_SCAN' and 'Historian' in split_node[2]: graph['nodes'][element] = {'capacity': 5} elif split_node[0] == 'LICQ_REMOTE_TO_USER' and 'Historian' in split_node[2]: graph['nodes'][element] = {'capacity': 2} elif split_node[0] == 'LOCAL_SETUID_BUFFERFLOW' and 'Historian' in split_node[2]: graph['nodes'][element] = {'capacity': 2} elif split_node[0] == 'BRUTEFORCESMA' and 'Pv' in split_node[2]: graph['nodes'][element] = {'capacity': 4} elif split_node[0] == 'ETERNALBLUEVUL' and 'Scada' in split_node[2]: graph['nodes'][element] = {'capacity': 2} elif split_node[0] == 'SMBSENSILEAKVUL' and 'Scada' in split_node[2]: graph['nodes'][element] = {'capacity': 2} elif split_node[0] == 'CODESYSVUL' and 'Splc' in split_node[2]: graph['nodes'][element] = {'capacity': 3} elif split_node[0] == 'DROPBEARSSHVUL' and 'Splc' in split_node[2]: graph['nodes'][element] = {'capacity': 3} elif split_node[0] == 'CHANNELHIJACK' and 'Splc' in split_node[2]: graph['nodes'][element] = {'capacity': 5} elif split_node[0] == 'CODESYSVUL' and 'Cplc' in split_node[2]: graph['nodes'][element] = {'capacity': 3} else: graph['nodes'][element] = {'capacity': random.randint(1, 5)} return graph # 将原始图转换为辅助图 def transform_graph(graph, id_to_element): G = nx.DiGraph() source = id_to_element[0] sink = id_to_element[1] for node in graph['nodes']: capacity = graph['nodes'][node].get('capacity', int(1e10)) node_in = f"{node}_in" node_out = f"{node}_out" G.add_node(node_in) G.add_node(node_out) G.add_edge(node_in, node_out, capacity=capacity, label=f"{node_in}+{node_out}") for u in graph['edges']: u_out = f"{u}_out" for v in graph['edges'][u]: v_in = f"{v}_in" G.add_edge(u_out, v_in, capacity=int(1e10), label=f"{u_out}+{v_in}") source = f"{source}_in" sink = f"{sink}_out" return G, source, sink """ # 计算最大流 def max_flow(transformed_graph, trans_source, trans_sink, flow_func=preflow_push): return nx.maximum_flow(transformed_graph, trans_source, trans_sink, flow_func=flow_func) """ #计算最大流--算法选择版(默认dinic算法) def max_flow(transformed_graph, trans_source, trans_sink, algorithm='dinic'): """ 封装多种最大流算法,支持算法选择,返回值与原函数一致 :param transformed_graph: 转换后的图 :param trans_source: 源点 :param trans_sink: 汇点 :param algorithm: 算法选择,可选 ['ford_fulkerson', 'edmonds_karp', 'dinic'] :return: (flow_value, flow_dict) """ if algorithm == 'ford_fulkerson': flow_func = nx.algorithms.flow.ford_fulkerson elif algorithm == 'edmonds_karp': flow_func = nx.algorithms.flow.edmonds_karp elif algorithm == 'dinic': flow_func = nx.algorithms.flow.preflow_push else: raise ValueError(f"Unsupported algorithm: {algorithm}") return nx.maximum_flow(transformed_graph, trans_source, trans_sink, flow_func=flow_func) # 求解所有最小割 def all_min_cuts(transformed_graph, max_flow_value, flow_dict, trans_source, trans_sink): # 找出所有饱的节点拆分边 saturated_edges = [] for u, v in transformed_graph.edges(): # if abs(flow_dict[u][v] - transformed_graph[u][v]['capacity']) < 1e-10: # 考虑浮点数误差 # saturated_edges.append((u, v)) if transformed_graph[u][v]['capacity']<=max_flow_value: saturated_edges.append((u, v)) all_cutsets = [] n = len(saturated_edges) def _is_valid_cutset(transformed_graph, cutset, trans_source, trans_sink): """检查是否是有效的割集(是否完全分离源点汇点)""" G = transformed_graph.copy() G.remove_edges_from(cutset) try: return not nx.has_path(G, trans_source, trans_sink) except nx.NetworkXNoPath: return True def dfs(index, current_cutset, current_capacity): # 如果当前容量已经超过最大流,剪枝 if current_capacity > max_flow_value: return # 如果当前cutset是有效的割集且容量等于最大流,添加到结果中 if current_capacity == max_flow_value and _is_valid_cutset(transformed_graph, current_cutset, trans_source, trans_sink): all_cutsets.append(set([transformed_graph[edge[0]][edge[1]]['label'] for edge in current_cutset])) return # 已经处理完所有边 if index >= n: return # 不选择当前边 dfs(index + 1, current_cutset.copy(), current_capacity) # 选择当前边 edge = saturated_edges[index] current_cutset.add(edge) new_capacity = current_capacity + transformed_graph[edge[0]][edge[1]]['capacity'] dfs(index + 1, current_cutset, new_capacity) # 从空集开始搜索 dfs(0, set(), 0) # 移除可能的重复割集 return list(map(set, {tuple(sorted(cutset)) for cutset in all_cutsets})) def is_heavy_node(G, node, lambda_upper): """ 判断与 node 同层及上一层的所有边容量是否都大于 lambda_upper。 要求所有前驱节点具有相同的后继节点集合,且这些后继节点也具有相同的前驱节点集合。 如果只有一个前驱一个后继,则直接与阈值比较。 """ # 获取 node 的直接前驱节点集合 predecessors = list(G.predecessors(node)) if len(predecessors) <= 1 or len(set(G.successors(predecessors[0]))) <= 1: return False # 获取第一个前驱的所有后继节点集合作为参考 ref_successors = set(G.successors(predecessors[0])) pred_set = set(predecessors) # 检查所有前驱的后继节点集合是否相同 for pred in predecessors[1:]: if set(G.successors(pred)) != ref_successors: return False # 检查所有后继节点的前驱集合是否与原始前驱集合相同 for succ in ref_successors: if set(G.predecessors(succ)) != pred_set: return False # 检查所有前驱到所有后继的边容量是否都大于阈值 return all(G[pred][succ]['capacity'] > lambda_upper for pred in predecessors for succ in ref_successors) def contract_heavy_node(G, node, max_node_id): """ 合并指定节点的前驱后继到一个新节点,并移除相关节点。 """ new_G = G.copy() # 获取直接前驱后继节点 predecessors = set(G.predecessors(node)) successors = {succ for succ in G.successors(list(predecessors)[0])} # 创建新节点 new_node = f"heavy_{max_node_id}" new_G.add_node(new_node) # 合并前驱的边 for pre in predecessors: for pre_pre in G.predecessors(pre): new_G.add_edge( pre_pre, new_node, capacity=G[pre_pre][pre]['capacity'] + new_G[pre_pre][new_node]['capacity'] if (pre_pre, new_node) in new_G.edges else G[pre_pre][pre]['capacity'], label=f"{G[pre_pre][pre]['label']}+{new_G[pre_pre][new_node]['label']}" if (pre_pre, new_node) in new_G.edges else G[pre_pre][pre]['label'] ) # 合并后继的边 for suc in successors: for suc_suc in G.successors(suc): new_G.add_edge( new_node, suc_suc, capacity=G[suc][suc_suc]['capacity'] + new_G[new_node][suc_suc]['capacity'] if (new_node, suc_suc) in new_G.edges else G[suc][suc_suc]['capacity'], label=f"{G[suc][suc_suc]['label']}+{new_G[new_node][suc_suc]['label']}" if (new_node, suc_suc) in new_G.edges else G[suc][suc_suc]['label'] ) # 删除旧节点及其前后驱 nodes_to_remove = predecessors.union(successors) # 绘制删除的节点在图中 # draw_graph(new_G, f"images/heavy_{max_node_id}", nodes_to_remove) new_G.remove_nodes_from(nodes_to_remove) return new_G def is_one_degree_node(G, node, lambda_upper): if G.in_degree(node) == 1 and G.out_degree(node) == 1: return True return False def contract_one_degree_node(G, node, lambda_upper): new_G = G.copy() predecessors = list(new_G.predecessors(node))[0] successors = list(new_G.successors(node))[0] # 判断前驱后继的容量哪个大 if new_G[predecessors][node]['capacity'] > new_G[node][successors]['capacity']: if (predecessors, successors) not in new_G.edges: new_G.add_edge(predecessors, successors, capacity=new_G[node][successors]['capacity'], label=new_G[node][successors]['label']) new_G.remove_node(node) else: new_G[predecessors][successors]['capacity'] += new_G[node][successors]['capacity'] new_G[predecessors][successors]['label'] += "+" + new_G[node][successors]['label'] new_G.remove_node(node) elif new_G[predecessors][node]['capacity'] < new_G[node][successors]['capacity']: if (predecessors, successors) not in new_G.edges: new_G.add_edge(predecessors, successors, capacity=new_G[predecessors][node]['capacity'], label=new_G[predecessors][node]['label']) new_G.remove_node(node) else: new_G[predecessors][successors]['capacity'] += new_G[predecessors][node]['capacity'] new_G[predecessors][successors]['label'] += "+" + new_G[predecessors][node]['label'] new_G.remove_node(node) elif new_G[predecessors][node]['capacity']>lambda_upper and new_G[node][successors]['capacity']>lambda_upper: if (predecessors, successors) not in new_G.edges: new_G.add_edge(predecessors, successors, capacity=new_G[predecessors][node]['capacity'], label=new_G[predecessors][node]['label']) new_G.remove_node(node) else: new_G[predecessors][successors]['capacity'] += new_G[predecessors][node]['capacity'] new_G[predecessors][successors]['label'] += "+" + new_G[predecessors][node]['label'] new_G.remove_node(node) return new_G def contract_graph_node(G, lambda_upper, shrink=is_heavy_node): contracted_G = G.copy() i = 0 max_node_id = contracted_G.number_of_nodes() + 1 while True: old_size = contracted_G.number_of_nodes() node_to_delete = [] # 找出所有需要合并的节点 for node in contracted_G.nodes: if shrink(contracted_G, node, lambda_upper): node_to_delete.append(node) if not node_to_delete: break # if shrink == is_heavy_node: # draw_graph(contracted_G, f"images/graph_{i}", node_to_delete) # 合并找到的边 for node in node_to_delete: if node in contracted_G: if shrink == is_one_degree_node: contracted_G = contract_one_degree_node(contracted_G, node, lambda_upper) elif shrink == is_heavy_node: contracted_G = contract_heavy_node(contracted_G, node, max_node_id) max_node_id += 1 # 如果图的大小没有变化,说明无法继续收缩 if contracted_G.number_of_nodes() == old_size: break i += 1 # 更新源点汇点 source = [node for node in contracted_G.nodes if contracted_G.in_degree(node) == 0][0] sink = [node for node in contracted_G.nodes if contracted_G.out_degree(node) == 0][0] return contracted_G, source, sink class UnionFind: def __init__(self, size): self.parent = list(range(size)) def find(self, x): if self.parent[x] != x: self.parent[x] = self.find(self.parent[x]) return self.parent[x] def union(self, x, y): fx = self.find(x) fy = self.find(y) if fx != fy: self.parent[fy] = fx def partition_graph(G, source, sink): """ 将图 G 分区,返回包含起点终点的子图集合。 参数: G: 有向图 (networkx.DiGraph) source: 起点节点 sink: 终点节点 返回: subgraph_list: 包含起点终点的子图列表 """ # 初始化并查集节点索引映射 uf = UnionFind(len(G.nodes)) node_to_index = {node: idx for idx, node in enumerate(G.nodes)} # 遍历图中的每条边 for u, v in G.edges(): u_idx = node_to_index[u] v_idx = node_to_index[v] # 跳过与 source sink 相连的边 if u == source or u == sink or v == source or v == sink: continue # 合并边的两个节点到并查集中 uf.union(u_idx, v_idx) # 根据并查集的结果生成子图 subgraphs = {} for node in G.nodes: if node == source or node == sink: continue root = uf.find(node_to_index[node]) if root not in subgraphs: subgraphs[root] = set() subgraphs[root].add(node) # 生成最终符合条件的子图 subgraph_list = [] # 如果 source sink 之间有边,添加单独的子图 if G.has_edge(source, sink): subgraph = G.subgraph([source, sink]).copy() G.remove_edge(source, sink) subgraph_list.append(subgraph) # 为每个子图添加 source sink 节点 for nodes in subgraphs.values(): nodes.update({source, sink}) subgraph = G.subgraph(nodes).copy() subgraph_list.append(subgraph) return subgraph_list def break_cycles(G, source, sink): # 删除自环 G.remove_edges_from(nx.selfloop_edges(G)) while True: # 获取图中的所有边 edges = list(G.edges()) edges_to_remove = [] # 遍历所有边,检查是否需要删除 for edge in edges: u, v = edge if set(G.predecessors(u)).intersection(set(G.predecessors(v))).difference({u, v}) and \ set(G.successors(u)).intersection(set(G.successors(v))).difference({u, v}): edges_to_remove.append(edge) # 如果没有更多边需要删除,则退出循环 if not edges_to_remove: break # 删除需要移除的边 G.remove_edges_from(edges_to_remove) # while True: # # 获取图中的所有节点 # nodes = list(G.nodes()) # nodes_to_remove = [] # # 遍历所有节点,检查是否需要删除 # for node in nodes: # if len(set(G.predecessors(node)).intersection(set(G.successors(node)))) == 1: # nodes_to_remove.append(node) # # 如果没有更多节点需要删除,则退出循环 # if not nodes_to_remove: # break # # 删除需要移除的节点 # G.remove_nodes_from(nodes_to_remove) return G def identify_critical_nodes(G, source, sink): # Step 1: 获取有向图的拓扑排序 topological_order = list(nx.topological_sort(G)) # Step 2: 计算in_count in_count = {node: 0 for node in G.nodes()} in_count[source] = 1 for node in topological_order: for successor in G.successors(node): in_count[successor] += in_count[node] # Step 3: 计算out_count out_count = {node: 0 for node in G.nodes()} out_count[sink] = 1 for node in reversed(topological_order): for predecessor in G.predecessors(node): out_count[predecessor] += out_count[node] # Step 4: 计算total_paths total_paths = in_count[sink] # Step 4: 计算critical_nodes critical_nodes = set() for node in G.nodes(): if in_count[node] * out_count[node] == total_paths: critical_nodes.add(node) # Step 5: 按照层级对关键节点进行排序 critical_nodes = sorted(critical_nodes, key=lambda x: topological_order.index(x)) return critical_nodes def partition_graph_by_key_nodes(transformed, key_nodes): """ 根据关键节点横向划分图 :param transformed: networkx图对象 :param key_nodes: 图的关键节点 :param source: 源点 :param sink: 汇点 :return: """ # 初始化子图列表 subgraphs = [] # 存储最大流,源点汇点 final_max_flow = int(1e10) # 按关键节点分割图 for i in range(len(key_nodes) - 1): start_node = key_nodes[i] end_node = key_nodes[i + 1] # 获取从start_node开始的所有后继节点 successors = nx.descendants(transformed, start_node) successors.add(start_node) # 包括起始节点 # 获取从end_node开始的所有前驱节点 predecessors = nx.ancestors(transformed, end_node) predecessors.add(end_node) # 包括终止节点 # 交集部分即为分区节点 nodes_in_partition = successors & predecessors # 创建子图 subgraph = transformed.subgraph(nodes_in_partition).copy() ################################################################################################ # 计算子图的最大流,更新 #max_flow = nx.maximum_flow_value(subgraph, start_node, end_node) max_flow_value, _ =max_flow(subgraph, start_node, end_node,ALGORITHM) ################################################################################################ if max_flow_value < final_max_flow: subgraphs.clear() final_max_flow = max_flow_value subgraphs.append((subgraph, start_node, end_node)) elif max_flow_value == final_max_flow: subgraphs.append((subgraph, start_node, end_node)) # draw_graph(subgraph, "images/subgraph_key_node_{}".format(i)) return subgraphs def process_subgraph(graph, source, sink): """递归处理子图,返回最小割列表总数""" # 识别关键节点 key_nodes = identify_critical_nodes(graph, source, sink) # 如果关键节点数量<=2,直接计算最小割 if len(key_nodes) <= 2: ################################################################################################# max_flow_value, flow_dict = max_flow(graph, source, sink,ALGORITHM) min_cuts = all_min_cuts(graph, max_flow_value, flow_dict, source, sink) return [min_cuts], len(min_cuts), max_flow_value # 按照关键节点横向划分 horizontal_subgraphs = partition_graph_by_key_nodes(graph, key_nodes) all_cuts = [] total_number = 0 max_flow_final = 0 # 处理每个横向子图 for h_subgraph, h_source, h_sink in horizontal_subgraphs: # 对横向子图进行纵向划分 vertical_subgraphs = partition_graph(h_subgraph, h_source, h_sink) sub_cuts = [] sub_total = 1 sub_max_flow = 0 # 处理每个纵向子图 for v_subgraph in vertical_subgraphs: # 递归处理子图 v_cuts, v_number, v_max_flow = process_subgraph(v_subgraph, h_source, h_sink) sub_cuts.append(v_cuts) sub_total *= v_number sub_max_flow += v_max_flow all_cuts.append(sub_cuts) total_number += sub_total max_flow_final = max(max_flow_final, sub_max_flow) return all_cuts, total_number, max_flow_final def draw_graph(G, output_file="graph", node_to_delete=None): """ 使用graphviz绘制图G 参数: G: networkx图对象 output_file: 输出文件名(不需要扩展名) """ # 创建有向图 dot = graphviz.Digraph(comment='Shrunk Graph') dot.attr(rankdir='LR') # 从左到右布局 # 为节点重新编号 # 将节点按照起点终点分组 start_nodes = [] # 只有出边的节点 end_nodes = [] # 只有入边的节点 middle_nodes = [] # 既有入边又有出边的节点 for node in G.nodes(): if G.in_degree(node) == 0: start_nodes.append(node) elif G.out_degree(node) == 0: end_nodes.append(node) else: middle_nodes.append(node) # 创建新的编号映射 node_mapping = {} current_num = 0 # 为起点编号 for node in start_nodes: node_mapping[node] = f"s{current_num}" current_num += 1 # 为中间节点编号 current_num = 0 for node in middle_nodes: node_mapping[node] = f"n{current_num}" current_num += 1 # 为终点编号 current_num = 0 for node in end_nodes: node_mapping[node] = f"t{current_num}" current_num += 1 # 添加节点 for node in G.nodes(): new_name = node_mapping[node] # 为不同类型的节点使用不同的形状颜色 if node in start_nodes: dot.node(new_name, new_name, shape='diamond', color='blue') elif node in end_nodes: dot.node(new_name, new_name, shape='diamond', color='red') else: dot.node(new_name, new_name, shape='circle') # 添加边 for u, v in G.edges(): capacity = G[u][v]['capacity'] dot.edge(node_mapping[u], node_mapping[v], label=str(capacity), fontsize='10') # 为需要删除的节点添加红色边框 if node_to_delete: for node in node_to_delete: dot.node(node_mapping[node], style='filled', color='red') # 保存图 # dot.render(output_file, format='png', cleanup=True) # 保存为pdf dot.render(output_file, format='pdf', cleanup=True) # 返回节点映射关系,方便查看 return node_mapping # 定义一轮求解 # def solve(file_path, t_type, t_num): # # 读取路径 # id_to_element, edges = read_paths(file_path) # # 初始化图 # graph = init_graph(id_to_element, edges) # # 绘制原图 # # 创建图对象 # G = nx.DiGraph() # # 添加节点 # for node in graph["nodes"]: # G.add_node(node) # # 添加边 # for u in graph["edges"]: # for v in graph["edges"][u]: # G.add_edge(u, v, capacity=graph["nodes"][u]["capacity"]) # # 绘制原图 # draw_graph(G, f"images/{t_type}/{t_num}/original_graph") # # 转换图 # transformed, source, sink = transform_graph(graph, id_to_element) # draw_graph(transformed, f"images/{t_type}/{t_num}/transformed_graph") # # 定义变量记录转换图中的节点数边数 # transformed_edge = transformed.number_of_edges() # transformed_node = transformed.number_of_nodes() # # 计算最大流 # max_flow_value1, flow_dict1 = max_flow(transformed, source, sink) # time1 = time.time() # # origion_min_cuts = all_min_cuts(transformed, max_flow_value1, flow_dict1, source, sink) # origion_min_cuts = [] # time2 = time.time() # print("最小割容量:", max_flow_value1) # # print("最小割有哪些边:", origion_min_cuts) # print(f"计算所有割集耗时:{time2 - time1}") # print(f"最大流的值:{max_flow_value1}") # print(f"共有{len(origion_min_cuts)}个割集") # # 重点收缩 # time3 = time.time() # transformed, source, sink = contract_graph_node(transformed, max_flow_value1, is_heavy_node) # time4 = time.time() # draw_graph(transformed, f"images/{t_type}/{t_num}/heavy_node_graph") # # 记录重点收缩后的节点数边数 # heavy_node_edge = transformed.number_of_edges() # heavy_node_node = transformed.number_of_nodes() # #  计算最大流 # max_flow_value2, flow_dict2 = max_flow(transformed, source, sink) # # 比较收缩是否影响最大流 # print(f"重点收缩后的最大流与原始最大流的差值:{max_flow_value2 - max_flow_value1}") # # 度为1的节点收缩 # time5 = time.time() # transformed, source, sink = contract_graph_node(transformed, max_flow_value2, is_one_degree_node) # time6 = time.time() # nodemapping = draw_graph(transformed, f"images/{t_type}/{t_num}/one_degree_node_graph") # # print(nodemapping) # # 记录度为1节点收缩后的节点数边数 # one_degree_node_edge = transformed.number_of_edges() # one_degree_node_node = transformed.number_of_nodes() # max_flow_value3, flow_dict3 = max_flow(transformed, source, sink) # print(f"度为1的节点收缩后的最大流与重点收缩后的最大流的差值:{max_flow_value3 - max_flow_value2}") # # 求解所有最小割 # time9 = time.time() # third_min_cuts = all_min_cuts(transformed, max_flow_value3, flow_dict3, source, sink) # time10 = time.time() # # 识别关键节点 # time7 = time.time() # # break_cycles(transformed, source, sink) # key_nodes = identify_critical_nodes(transformed, source, sink) # time8 = time.time() # # 按照关键节点横切子图 # subgraphs = partition_graph_by_key_nodes(transformed, key_nodes) # all_cuts = [] # all_number = 0 # max_flow_final = 0 # for idx, (transformed, source, sink) in enumerate(subgraphs): # # 纵向切割 # subgraph_list = partition_graph(transformed, source, sink) # sub_cuts = [] # max_flow_value_all = 0 # sub_all_number = 1 # for index, subgraph in enumerate(subgraph_list): # max_flow_value, flow_dict = max_flow(subgraph, source, sink) # max_flow_value_all += max_flow_value # # 计算最小割 # min_cuts = all_min_cuts(subgraph, max_flow_value, flow_dict, source, sink) # sub_all_number *= len(min_cuts) # sub_cuts.append(min_cuts) # all_cuts.append(sub_cuts) # all_number += sub_all_number # max_flow_final = max(max_flow_final, max_flow_value_all) # time9 = time.time() # print(f"采用切割后的时间消耗:{time9 - time8}") # print(f"图中最小割的数量:{all_number}") # print(f"最大流的值:{max_flow_final}") # # 最小割求解时间 # origion_min_cuts_time = time2 - time1 # # 重点收缩时间 # heavy_node_time = time4 - time3 # # 度为1节点收缩时间 # one_degree_node_time = time6 - time5 # # 识别关键节点时间 # identify_critical_nodes_time = time8 - time7 # # 切割求解最小割时间 # subgraph_time = time9 - time8 # # 为所有时间保留三位小数 # origion_min_cuts_time = round(origion_min_cuts_time, 3) # heavy_node_time = round(heavy_node_time, 3) # one_degree_node_time = round(one_degree_node_time, 3) # identify_critical_nodes_time = round(identify_critical_nodes_time, 3) # subgraph_time = round(subgraph_time, 3) # return transformed_node, transformed_edge, heavy_node_node, heavy_node_edge, one_degree_node_node, one_degree_node_edge, origion_min_cuts_time, heavy_node_time, one_degree_node_time, identify_critical_nodes_time, subgraph_time, len(origion_min_cuts), all_number, max_flow_value1, max_flow_final import time import networkx as nx def solve(file_path, t_type, t_num): # 读取路径 id_to_element, edges = read_paths(file_path) # 初始化图 graph = init_graph(id_to_element, edges) # 绘制原图 original_graph = nx.DiGraph() original_graph.add_nodes_from(graph["nodes"]) for u in graph["edges"]: for v in graph["edges"][u]: original_graph.add_edge(u, v, capacity=graph["nodes"][u]["capacity"]) draw_graph(original_graph, f"images/{t_type}/{t_num}/original_graph") # 转换图 transformed_graph, source, sink = transform_graph(graph, id_to_element) draw_graph(transformed_graph, f"images/{t_type}/{t_num}/transformed_graph") transformed_node_count = transformed_graph.number_of_nodes() transformed_edge_count = transformed_graph.number_of_edges() # 计算最大流 max_flow_value1, flow_dict1 = max_flow(transformed_graph, source, sink,ALGORITHM) time1 = time.time() # origion_min_cuts = all_min_cuts(transformed_graph, max_flow_value1, flow_dict1, source, sink) origion_min_cuts = [] time2 = time.time() print("最小割容量:", max_flow_value1) print(f"共有{len(origion_min_cuts)}个割集") # 重点收缩 time3 = time.time() transformed_graph, source, sink = contract_graph_node(transformed_graph, max_flow_value1, is_heavy_node) time4 = time.time() draw_graph(transformed_graph, f"images/{t_type}/{t_num}/heavy_node_graph") heavy_node_count = transformed_graph.number_of_nodes() heavy_edge_count = transformed_graph.number_of_edges() ######################################################################################################## # 计算最大流 max_flow_value2, flow_dict2 = max_flow(transformed_graph, source, sink,ALGORITHM) print(f"重点收缩后的最大流与原始最大流的差值:{max_flow_value2 - max_flow_value1}") ######################################################################################################## # 度为1的节点收缩 time5 = time.time() transformed_graph, source, sink = contract_graph_node(transformed_graph, max_flow_value2, is_one_degree_node) time6 = time.time() draw_graph(transformed_graph, f"images/{t_type}/{t_num}/one_degree_node_graph") one_degree_node_count = transformed_graph.number_of_nodes() one_degree_edge_count = transformed_graph.number_of_edges() ######################################################################################################## # 计算最大流 max_flow_value3, flow_dict3 = max_flow(transformed_graph, source, sink,ALGORITHM) print(f"度为1的节点收缩后的最大流与重点收缩后的最大流的差值:{max_flow_value3 - max_flow_value2}") ############################################################################################################################################ # 求解所有最小割 time7 = time.time() third_min_cuts = all_min_cuts(transformed_graph, max_flow_value3, flow_dict3, source, sink) time8 = time.time() ############################################################################################################################################# # 记录时间 print(f"收缩后的时间消耗:{time8 - time7}") # 识别关键节点 time9 = time.time() key_nodes = identify_critical_nodes(transformed_graph, source, sink) time10 = time.time() all_cuts, all_number, max_flow_final = process_subgraph(transformed_graph, source, sink) # 按照关键节点划分子图 # subgraphs = partition_graph_by_key_nodes(transformed_graph, key_nodes) # all_cuts = [] # all_number = 0 # max_flow_final = 0 # for idx, (subgraph, sub_source, sub_sink) in enumerate(subgraphs): # subgraph_list = partition_graph(subgraph, sub_source, sub_sink) # sub_cuts = [] # max_flow_value_all = 0 # sub_all_number = 1 # for sub_idx, sub in enumerate(subgraph_list): # max_flow_value, flow_dict = max_flow(sub, sub_source, sub_sink) # max_flow_value_all += max_flow_value # min_cuts = all_min_cuts(sub, max_flow_value, flow_dict, sub_source, sub_sink) # sub_all_number *= len(min_cuts) # sub_cuts.append(min_cuts) # all_cuts.append(sub_cuts) # all_number += sub_all_number # max_flow_final = max(max_flow_final, max_flow_value_all) time11 = time.time() print(all_cuts) print(f"采用切割后的时间消耗:{time11 - time10}") print(f"图中最小割的数量:{all_number}") print(f"最大流的值:{max_flow_final}") # 记录时间消耗 heavy_node_contraction_time = round(time4 - time3, 3) one_degree_node_contraction_time = round(time6 - time5, 3) identify_critical_nodes_time = round(time10 - time9, 3) origion_min_cuts_time = round(time2 - time1, 3) subgraph_cut_time = round(time11 - time10, 3) shrink_cut_time = round(time8 - time7, 3) return ( transformed_node_count, transformed_edge_count, heavy_node_count, heavy_edge_count, one_degree_node_count, one_degree_edge_count, heavy_node_contraction_time, one_degree_node_contraction_time, identify_critical_nodes_time, origion_min_cuts_time, subgraph_cut_time, len(origion_min_cuts), shrink_cut_time, all_number, max_flow_value1, max_flow_final ) import os if __name__ == '__main__': def test03(): #topo_type = ["mesh", "ring", "star"] topo_type = ["mesh"] topo_num = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20] #topo_num =[0] dir_path = "/root/lwq_new_lab5/topo4/{}/{}/paths.txt" # 结果文件存储目录 result_path = "/root/lwq_new_lab5/result/{}_test.csv" # 定义一个文件存储结果数据 for t_type in topo_type: # 根据拓扑类型创建一个文件 fp = open(result_path.format(t_type), "w") # 创建一个csv写入对象 csv_writer = csv.writer(fp) # 写入表头 csv_writer.writerow( ["multi","transformed_node_count", "transformed_edge_count", "heavy_node_count", "heavy_edge_count", "one_degree_node_count", "one_degree_edge_count", "heavy_node_contraction_time", "one_degree_node_contraction_time", "identify_critical_nodes_time", "origion_min_cuts_time", "subgraph_cut_time", "origion_min_cuts_count", "shrink_cut_time", "all_cuts_count", "max_flow_value1", "max_flow_final"]) for t_num in topo_num: result = solve(dir_path.format(t_type, t_num), t_type, t_num) csv_writer.writerow([t_num] + list(result)) # 刷新缓冲区 fp.flush() fp.close() test03()我现在希望增加一个时间计算的部分,计算我调用不同算法后程序总的执行时间,并将最后结果写入csv文件,不同的算法单独写入文件,命名按算法来,最后结果输出到result目录下,应该怎样增加,修改代码
09-15
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值