redistribute.hpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. // Copyright (C) 2005-2006 The Trustees of Indiana University.
  2. // Use, modification and distribution is subject to the Boost Software
  3. // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
  4. // http://www.boost.org/LICENSE_1_0.txt)
  5. // Authors: Douglas Gregor
  6. // Andrew Lumsdaine
  7. //
  8. // Implements redistribution of vertices for a distributed adjacency
  9. // list. This file should not be included by users. It will be
  10. // included by the distributed adjacency list header.
  11. //
  12. #ifndef BOOST_GRAPH_USE_MPI
  13. #error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
  14. #endif
  15. #include <boost/pending/container_traits.hpp>
  16. namespace boost { namespace detail { namespace parallel {
  17. /* This structure contains a (vertex or edge) descriptor that is being
  18. moved from one processor to another. It contains the properties for
  19. that descriptor (if any).
  20. */
  21. template<typename Descriptor, typename DescriptorProperty>
  22. struct redistributed_descriptor : maybe_store_property<DescriptorProperty>
  23. {
  24. typedef maybe_store_property<DescriptorProperty> inherited;
  25. redistributed_descriptor() { }
  26. redistributed_descriptor(const Descriptor& v, const DescriptorProperty& p)
  27. : inherited(p), descriptor(v) { }
  28. Descriptor descriptor;
  29. private:
  30. friend class boost::serialization::access;
  31. template<typename Archiver>
  32. void serialize(Archiver& ar, unsigned int /*version*/)
  33. {
  34. ar & boost::serialization::base_object<inherited>(*this)
  35. & unsafe_serialize(descriptor);
  36. }
  37. };
  38. /* Predicate that returns true if the target has migrated. */
  39. template<typename VertexProcessorMap, typename Graph>
  40. struct target_migrated_t
  41. {
  42. typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
  43. typedef typename graph_traits<Graph>::edge_descriptor Edge;
  44. target_migrated_t(VertexProcessorMap vertex_to_processor, const Graph& g)
  45. : vertex_to_processor(vertex_to_processor), g(g) { }
  46. bool operator()(Edge e) const
  47. {
  48. typedef global_descriptor<Vertex> DVertex;
  49. processor_id_type owner = get(edge_target_processor_id, g, e);
  50. return get(vertex_to_processor, DVertex(owner, target(e, g))) != owner;
  51. }
  52. private:
  53. VertexProcessorMap vertex_to_processor;
  54. const Graph& g;
  55. };
  56. template<typename VertexProcessorMap, typename Graph>
  57. inline target_migrated_t<VertexProcessorMap, Graph>
  58. target_migrated(VertexProcessorMap vertex_to_processor, const Graph& g)
  59. { return target_migrated_t<VertexProcessorMap, Graph>(vertex_to_processor, g); }
  60. /* Predicate that returns true if the source of an in-edge has migrated. */
  61. template<typename VertexProcessorMap, typename Graph>
  62. struct source_migrated_t
  63. {
  64. typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
  65. typedef typename graph_traits<Graph>::edge_descriptor Edge;
  66. source_migrated_t(VertexProcessorMap vertex_to_processor, const Graph& g)
  67. : vertex_to_processor(vertex_to_processor), g(g) { }
  68. bool operator()(stored_in_edge<Edge> e) const
  69. {
  70. return get(vertex_to_processor, DVertex(e.source_processor, source(e.e, g)))
  71. != e.source_processor;
  72. }
  73. private:
  74. VertexProcessorMap vertex_to_processor;
  75. const Graph& g;
  76. };
  77. template<typename VertexProcessorMap, typename Graph>
  78. inline source_migrated_t<VertexProcessorMap, Graph>
  79. source_migrated(VertexProcessorMap vertex_to_processor, const Graph& g)
  80. { return source_migrated_t<VertexProcessorMap, Graph>(vertex_to_processor, g); }
  81. /* Predicate that returns true if the target has migrated. */
  82. template<typename VertexProcessorMap, typename Graph>
  83. struct source_or_target_migrated_t
  84. {
  85. typedef typename graph_traits<Graph>::edge_descriptor Edge;
  86. source_or_target_migrated_t(VertexProcessorMap vertex_to_processor,
  87. const Graph& g)
  88. : vertex_to_processor(vertex_to_processor), g(g) { }
  89. bool operator()(Edge e) const
  90. {
  91. return get(vertex_to_processor, source(e, g)) != source(e, g).owner
  92. || get(vertex_to_processor, target(e, g)) != target(e, g).owner;
  93. }
  94. private:
  95. VertexProcessorMap vertex_to_processor;
  96. const Graph& g;
  97. };
  98. template<typename VertexProcessorMap, typename Graph>
  99. inline source_or_target_migrated_t<VertexProcessorMap, Graph>
  100. source_or_target_migrated(VertexProcessorMap vertex_to_processor,
  101. const Graph& g)
  102. {
  103. typedef source_or_target_migrated_t<VertexProcessorMap, Graph> result_type;
  104. return result_type(vertex_to_processor, g);
  105. }
  106. } } // end of namespace detail::parallel
  107. template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
  108. template<typename VertexProcessorMap>
  109. void
  110. PBGL_DISTRIB_ADJLIST_TYPE
  111. ::request_in_neighbors(vertex_descriptor v,
  112. VertexProcessorMap vertex_to_processor,
  113. bidirectionalS)
  114. {
  115. BGL_FORALL_INEDGES_T(v, e, *this, graph_type)
  116. request(vertex_to_processor, source(e, *this));
  117. }
  118. template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
  119. template<typename VertexProcessorMap>
  120. void
  121. PBGL_DISTRIB_ADJLIST_TYPE
  122. ::remove_migrated_in_edges(vertex_descriptor v,
  123. VertexProcessorMap vertex_to_processor,
  124. bidirectionalS)
  125. {
  126. graph_detail::erase_if(get(vertex_in_edges, base())[v.local],
  127. source_migrated(vertex_to_processor, base()));
  128. }
  129. template<PBGL_DISTRIB_ADJLIST_TEMPLATE_PARMS>
  130. template<typename VertexProcessorMap>
  131. void
  132. PBGL_DISTRIB_ADJLIST_TYPE
  133. ::redistribute(VertexProcessorMap vertex_to_processor)
  134. {
  135. using boost::parallel::inplace_all_to_all;
  136. // When we have stable descriptors, we only move those descriptors
  137. // that actually need to be moved. Otherwise, we essentially have to
  138. // regenerate the entire graph.
  139. const bool has_stable_descriptors =
  140. is_same<typename config_type::vertex_list_selector, listS>::value
  141. || is_same<typename config_type::vertex_list_selector, setS>::value
  142. || is_same<typename config_type::vertex_list_selector, multisetS>::value;
  143. typedef detail::parallel::redistributed_descriptor<vertex_descriptor,
  144. vertex_property_type>
  145. redistributed_vertex;
  146. typedef detail::parallel::redistributed_descriptor<edge_descriptor,
  147. edge_property_type>
  148. redistributed_edge;
  149. vertex_iterator vi, vi_end;
  150. edge_iterator ei, ei_end;
  151. process_group_type pg = process_group();
  152. // Initial synchronization makes sure that we have all of our ducks
  153. // in a row. We don't want any outstanding add/remove messages
  154. // coming in mid-redistribution!
  155. synchronize(process_group_);
  156. // We cannot cope with eviction of ghost cells
  157. vertex_to_processor.set_max_ghost_cells(0);
  158. process_id_type p = num_processes(pg);
  159. // Send vertices and edges to the processor where they will
  160. // actually reside. This requires O(|V| + |E|) communication
  161. std::vector<std::vector<redistributed_vertex> > redistributed_vertices(p);
  162. std::vector<std::vector<redistributed_edge> > redistributed_edges(p);
  163. // Build the sets of relocated vertices for each process and then do
  164. // an all-to-all transfer.
  165. for (boost::tie(vi, vi_end) = vertices(*this); vi != vi_end; ++vi) {
  166. if (!has_stable_descriptors
  167. || get(vertex_to_processor, *vi) != vi->owner) {
  168. redistributed_vertices[get(vertex_to_processor, *vi)]
  169. .push_back(redistributed_vertex(*vi, get(vertex_all_t(), base(),
  170. vi->local)));
  171. }
  172. // When our descriptors are stable, we need to determine which
  173. // adjacent descriptors are stable to determine which edges will
  174. // be removed.
  175. if (has_stable_descriptors) {
  176. BGL_FORALL_OUTEDGES_T(*vi, e, *this, graph_type)
  177. request(vertex_to_processor, target(e, *this));
  178. request_in_neighbors(*vi, vertex_to_processor, directed_selector());
  179. }
  180. }
  181. inplace_all_to_all(pg, redistributed_vertices);
  182. // If we have stable descriptors, we need to know where our neighbor
  183. // vertices are moving.
  184. if (has_stable_descriptors)
  185. synchronize(vertex_to_processor);
  186. // Build the sets of relocated edges for each process and then do
  187. // an all-to-all transfer.
  188. for (boost::tie(ei, ei_end) = edges(*this); ei != ei_end; ++ei) {
  189. vertex_descriptor src = source(*ei, *this);
  190. vertex_descriptor tgt = target(*ei, *this);
  191. if (!has_stable_descriptors
  192. || get(vertex_to_processor, src) != src.owner
  193. || get(vertex_to_processor, tgt) != tgt.owner)
  194. redistributed_edges[get(vertex_to_processor, source(*ei, *this))]
  195. .push_back(redistributed_edge(*ei, split_edge_property(get(edge_all_t(), base(),
  196. ei->local))));
  197. }
  198. inplace_all_to_all(pg, redistributed_edges);
  199. // A mapping from old vertex descriptors to new vertex
  200. // descriptors. This is an STL map partly because I'm too lazy to
  201. // build a real property map (which is hard in the general case) but
  202. // also because it won't try to look in the graph itself, because
  203. // the keys are all vertex descriptors that have been invalidated.
  204. std::map<vertex_descriptor, vertex_descriptor> old_to_new_vertex_map;
  205. if (has_stable_descriptors) {
  206. // Clear out all vertices and edges that will have moved. There
  207. // are several stages to this.
  208. // First, eliminate all outgoing edges from the (local) vertices
  209. // that have been moved or whose targets have been moved.
  210. BGL_FORALL_VERTICES_T(v, *this, graph_type) {
  211. if (get(vertex_to_processor, v) != v.owner) {
  212. clear_out_edges(v.local, base());
  213. clear_in_edges_local(v, directed_selector());
  214. } else {
  215. remove_out_edge_if(v.local,
  216. target_migrated(vertex_to_processor, base()),
  217. base());
  218. remove_migrated_in_edges(v, vertex_to_processor, directed_selector());
  219. }
  220. }
  221. // Next, eliminate locally-stored edges that have migrated (for
  222. // undirected graphs).
  223. graph_detail::erase_if(local_edges_,
  224. source_or_target_migrated(vertex_to_processor, *this));
  225. // Eliminate vertices that have migrated
  226. for (boost::tie(vi, vi_end) = vertices(*this); vi != vi_end; /* in loop */) {
  227. if (get(vertex_to_processor, *vi) != vi->owner)
  228. remove_vertex((*vi++).local, base());
  229. else {
  230. // Add the identity relation for vertices that have not migrated
  231. old_to_new_vertex_map[*vi] = *vi;
  232. ++vi;
  233. }
  234. }
  235. } else {
  236. // Clear out the local graph: the entire graph is in transit
  237. clear();
  238. }
  239. // Add the new vertices to the graph. When we do so, update the old
  240. // -> new vertex mapping both locally and for the owner of the "old"
  241. // vertex.
  242. {
  243. typedef std::pair<vertex_descriptor, vertex_descriptor> mapping_pair;
  244. std::vector<std::vector<mapping_pair> > mappings(p);
  245. for (process_id_type src = 0; src < p; ++src) {
  246. for (typename std::vector<redistributed_vertex>::iterator vi =
  247. redistributed_vertices[src].begin();
  248. vi != redistributed_vertices[src].end(); ++vi) {
  249. vertex_descriptor new_vertex =
  250. add_vertex(vi->get_property(), *this);
  251. old_to_new_vertex_map[vi->descriptor] = new_vertex;
  252. mappings[vi->descriptor.owner].push_back(mapping_pair(vi->descriptor,
  253. new_vertex));
  254. }
  255. redistributed_vertices[src].clear();
  256. }
  257. inplace_all_to_all(pg, mappings);
  258. // Add the mappings we were sent into the old->new map.
  259. for (process_id_type src = 0; src < p; ++src)
  260. old_to_new_vertex_map.insert(mappings[src].begin(), mappings[src].end());
  261. }
  262. // Get old->new vertex mappings for all of the vertices we need to
  263. // know about.
  264. // TBD: An optimization here might involve sending the
  265. // request-response pairs without an explicit request step (for
  266. // bidirectional and undirected graphs). However, it may not matter
  267. // all that much given the cost of redistribution.
  268. {
  269. std::vector<std::vector<vertex_descriptor> > vertex_map_requests(p);
  270. std::vector<std::vector<vertex_descriptor> > vertex_map_responses(p);
  271. // We need to know about all of the vertices incident on edges
  272. // that have been relocated to this processor. Tell each processor
  273. // what each other processor needs to know.
  274. for (process_id_type src = 0; src < p; ++src)
  275. for (typename std::vector<redistributed_edge>::iterator ei =
  276. redistributed_edges[src].begin();
  277. ei != redistributed_edges[src].end(); ++ei) {
  278. vertex_descriptor need_vertex = target(ei->descriptor, *this);
  279. if (old_to_new_vertex_map.find(need_vertex)
  280. == old_to_new_vertex_map.end())
  281. {
  282. old_to_new_vertex_map[need_vertex] = need_vertex;
  283. vertex_map_requests[need_vertex.owner].push_back(need_vertex);
  284. }
  285. }
  286. inplace_all_to_all(pg,
  287. vertex_map_requests,
  288. vertex_map_responses);
  289. // Process the requests made for vertices we own. Then perform yet
  290. // another all-to-all swap. This one matches the requests we've
  291. // made to the responses we were given.
  292. for (process_id_type src = 0; src < p; ++src)
  293. for (typename std::vector<vertex_descriptor>::iterator vi =
  294. vertex_map_responses[src].begin();
  295. vi != vertex_map_responses[src].end(); ++vi)
  296. *vi = old_to_new_vertex_map[*vi];
  297. inplace_all_to_all(pg, vertex_map_responses);
  298. // Matching the requests to the responses, update the old->new
  299. // vertex map for all of the vertices we will need to know.
  300. for (process_id_type src = 0; src < p; ++src) {
  301. typedef typename std::vector<vertex_descriptor>::size_type size_type;
  302. for (size_type i = 0; i < vertex_map_requests[src].size(); ++i) {
  303. old_to_new_vertex_map[vertex_map_requests[src][i]] =
  304. vertex_map_responses[src][i];
  305. }
  306. }
  307. }
  308. // Add edges to the graph by mapping the source and target.
  309. for (process_id_type src = 0; src < p; ++src) {
  310. for (typename std::vector<redistributed_edge>::iterator ei =
  311. redistributed_edges[src].begin();
  312. ei != redistributed_edges[src].end(); ++ei) {
  313. add_edge(old_to_new_vertex_map[source(ei->descriptor, *this)],
  314. old_to_new_vertex_map[target(ei->descriptor, *this)],
  315. ei->get_property(),
  316. *this);
  317. }
  318. redistributed_edges[src].clear();
  319. }
  320. // Be sure that edge-addition messages are received now, completing
  321. // the graph.
  322. synchronize(process_group_);
  323. this->distribution().clear();
  324. detail::parallel::maybe_initialize_vertex_indices(vertices(base()),
  325. get(vertex_index, base()));
  326. }
  327. } // end namespace boost