shared_ptr -> std::shared_ptr

This is one of the first commits in a plan to remove all `using namespace std;` lines in the entire codebase as it is considered anti-pattern today.
This commit is contained in:
void_17
2026-03-02 15:58:20 +07:00
parent d63f79325f
commit 7074f35e4b
1373 changed files with 12054 additions and 12054 deletions

View File

@@ -16,7 +16,7 @@
#include <boost/mpi/config.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/std::shared_ptr.hpp>
#include <boost/mpi/datatype.hpp>
#include <utility>
#include <iterator>
@@ -89,7 +89,7 @@ enum comm_create_kind { comm_duplicate, comm_take_ownership, comm_attach };
/**
* INTERNAL ONLY
*
*
* Forward declaration of @c group needed for the @c group
* constructor and accessor.
*/
@@ -436,7 +436,7 @@ class BOOST_MPI_DECL communicator
* @brief Receive an array of values from a remote process.
*
* This routine blocks until it receives an array of values from the
* process @p source with the given @p tag. If the type @c T is
* process @p source with the given @p tag. If the type @c T is
*
* @param source The process that will be sending data. This will
* either be a process rank within the communicator or the
@@ -518,7 +518,7 @@ class BOOST_MPI_DECL communicator
* @c skeleton_proxy objects except that @c isend will not block
* while waiting for the data to be transmitted. Instead, a request
* object will be immediately returned, allowing one to query the
* status of the communication or wait until it has completed.
* status of the communication or wait until it has completed.
*
* The semantics of this routine are equivalent to a non-blocking
* send of a @c packed_skeleton_oarchive storing the skeleton of
@@ -628,7 +628,7 @@ class BOOST_MPI_DECL communicator
* @brief Initiate receipt of an array of values from a remote process.
*
* This routine initiates a receive operation for an array of values
* transmitted by process @p source with the given @p tag.
* transmitted by process @p source with the given @p tag.
*
* @param source The process that will be sending data. This will
* either be a process rank within the communicator or the
@@ -824,9 +824,9 @@ class BOOST_MPI_DECL communicator
#if 0
template<typename Extents>
communicator
with_cartesian_topology(const Extents& extents,
bool periodic = false,
communicator
with_cartesian_topology(const Extents& extents,
bool periodic = false,
bool reorder = false) const;
template<typename DimInputIterator, typename PeriodicInputIterator>
@@ -863,7 +863,7 @@ class BOOST_MPI_DECL communicator
*
* Function object that frees an MPI communicator and deletes the
* memory associated with it. Intended to be used as a deleter with
* shared_ptr.
* std::shared_ptr.
*/
struct comm_free
{
@@ -877,7 +877,7 @@ class BOOST_MPI_DECL communicator
}
};
/**
* INTERNAL ONLY
*
@@ -904,7 +904,7 @@ class BOOST_MPI_DECL communicator
* datatype, so we map directly to that datatype.
*/
template<typename T>
void
void
array_send_impl(int dest, int tag, const T* values, int n, mpl::true_) const;
/**
@@ -915,8 +915,8 @@ class BOOST_MPI_DECL communicator
* data, to be deserialized on the receiver side.
*/
template<typename T>
void
array_send_impl(int dest, int tag, const T* values, int n,
void
array_send_impl(int dest, int tag, const T* values, int n,
mpl::false_) const;
/**
@@ -945,8 +945,8 @@ class BOOST_MPI_DECL communicator
* datatype, so we map directly to that datatype.
*/
template<typename T>
request
array_isend_impl(int dest, int tag, const T* values, int n,
request
array_isend_impl(int dest, int tag, const T* values, int n,
mpl::true_) const;
/**
@@ -957,8 +957,8 @@ class BOOST_MPI_DECL communicator
* data, to be deserialized on the receiver side.
*/
template<typename T>
request
array_isend_impl(int dest, int tag, const T* values, int n,
request
array_isend_impl(int dest, int tag, const T* values, int n,
mpl::false_) const;
/**
@@ -987,7 +987,7 @@ class BOOST_MPI_DECL communicator
* datatype, so we map directly to that datatype.
*/
template<typename T>
status
status
array_recv_impl(int source, int tag, T* values, int n, mpl::true_) const;
/**
@@ -998,7 +998,7 @@ class BOOST_MPI_DECL communicator
* MPI_PACKED. We'll receive it and then deserialize.
*/
template<typename T>
status
status
array_recv_impl(int source, int tag, T* values, int n, mpl::false_) const;
/**
@@ -1027,7 +1027,7 @@ class BOOST_MPI_DECL communicator
* map directly to that datatype.
*/
template<typename T>
request
request
array_irecv_impl(int source, int tag, T* values, int n, mpl::true_) const;
/**
@@ -1038,10 +1038,10 @@ class BOOST_MPI_DECL communicator
* MPI_PACKED. We'll receive it and then deserialize.
*/
template<typename T>
request
request
array_irecv_impl(int source, int tag, T* values, int n, mpl::false_) const;
shared_ptr<MPI_Comm> comm_ptr;
std::shared_ptr<MPI_Comm> comm_ptr;
};
/**
@@ -1070,13 +1070,13 @@ inline bool operator!=(const communicator& comm1, const communicator& comm2)
* Implementation details *
************************************************************************/
// Count elements in a message
template<typename T>
template<typename T>
inline optional<int> status::count() const
{
return count_impl<T>(is_mpi_datatype<T>());
}
template<typename T>
template<typename T>
optional<int> status::count_impl(mpl::true_) const
{
if (m_count != -1)
@@ -1092,7 +1092,7 @@ optional<int> status::count_impl(mpl::true_) const
return m_count = return_value;
}
template<typename T>
template<typename T>
inline optional<int> status::count_impl(mpl::false_) const
{
if (m_count == -1)
@@ -1140,7 +1140,7 @@ communicator::array_send_impl(int dest, int tag, const T* values, int n,
mpl::true_) const
{
BOOST_MPI_CHECK_RESULT(MPI_Send,
(const_cast<T*>(values), n,
(const_cast<T*>(values), n,
get_mpi_datatype<T>(*values),
dest, tag, MPI_Comm(*this)));
}
@@ -1173,7 +1173,7 @@ status communicator::recv_impl(int source, int tag, T& value, mpl::true_) const
status stat;
BOOST_MPI_CHECK_RESULT(MPI_Recv,
(const_cast<T*>(&value), 1,
(const_cast<T*>(&value), 1,
get_mpi_datatype<T>(value),
source, tag, MPI_Comm(*this), &stat.m_status));
return stat;
@@ -1202,13 +1202,13 @@ status communicator::recv(int source, int tag, T& value) const
}
template<typename T>
status
communicator::array_recv_impl(int source, int tag, T* values, int n,
status
communicator::array_recv_impl(int source, int tag, T* values, int n,
mpl::true_) const
{
status stat;
BOOST_MPI_CHECK_RESULT(MPI_Recv,
(const_cast<T*>(values), n,
(const_cast<T*>(values), n,
get_mpi_datatype<T>(*values),
source, tag, MPI_Comm(*this), &stat.m_status));
return stat;
@@ -1216,7 +1216,7 @@ communicator::array_recv_impl(int source, int tag, T* values, int n,
template<typename T>
status
communicator::array_recv_impl(int source, int tag, T* values, int n,
communicator::array_recv_impl(int source, int tag, T* values, int n,
mpl::false_) const
{
// Receive the message
@@ -1255,7 +1255,7 @@ communicator::isend_impl(int dest, int tag, const T& value, mpl::true_) const
{
request req;
BOOST_MPI_CHECK_RESULT(MPI_Isend,
(const_cast<T*>(&value), 1,
(const_cast<T*>(&value), 1,
get_mpi_datatype<T>(value),
dest, tag, MPI_Comm(*this), &req.m_requests[0]));
return req;
@@ -1268,7 +1268,7 @@ template<typename T>
request
communicator::isend_impl(int dest, int tag, const T& value, mpl::false_) const
{
shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
std::shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
*archive << value;
request result = isend(dest, tag, *archive);
result.m_data = archive;
@@ -1290,7 +1290,7 @@ communicator::array_isend_impl(int dest, int tag, const T* values, int n,
{
request req;
BOOST_MPI_CHECK_RESULT(MPI_Isend,
(const_cast<T*>(values), n,
(const_cast<T*>(values), n,
get_mpi_datatype<T>(*values),
dest, tag, MPI_Comm(*this), &req.m_requests[0]));
return req;
@@ -1298,10 +1298,10 @@ communicator::array_isend_impl(int dest, int tag, const T* values, int n,
template<typename T>
request
communicator::array_isend_impl(int dest, int tag, const T* values, int n,
communicator::array_isend_impl(int dest, int tag, const T* values, int n,
mpl::false_) const
{
shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
std::shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
*archive << n << boost::serialization::make_array(values, n);
request result = isend(dest, tag, *archive);
result.m_data = archive;
@@ -1324,15 +1324,15 @@ namespace detail {
template<typename T>
struct serialized_irecv_data
{
serialized_irecv_data(const communicator& comm, int source, int tag,
serialized_irecv_data(const communicator& comm, int source, int tag,
T& value)
: comm(comm), source(source), tag(tag), ia(comm), value(value)
{
: comm(comm), source(source), tag(tag), ia(comm), value(value)
{
}
void deserialize(status& stat)
{
ia >> value;
void deserialize(status& stat)
{
ia >> value;
stat.m_count = 1;
}
@@ -1347,7 +1347,7 @@ namespace detail {
template<>
struct serialized_irecv_data<packed_iarchive>
{
serialized_irecv_data(const communicator& comm, int source, int tag,
serialized_irecv_data(const communicator& comm, int source, int tag,
packed_iarchive& ia)
: comm(comm), source(source), tag(tag), ia(ia) { }
@@ -1367,10 +1367,10 @@ namespace detail {
template<typename T>
struct serialized_array_irecv_data
{
serialized_array_irecv_data(const communicator& comm, int source, int tag,
serialized_array_irecv_data(const communicator& comm, int source, int tag,
T* values, int n)
: comm(comm), source(source), tag(tag), ia(comm), values(values), n(n)
{
{
}
void deserialize(status& stat);
@@ -1390,26 +1390,26 @@ namespace detail {
// Determine how much data we are going to receive
int count;
ia >> count;
// Deserialize the data in the message
boost::serialization::array<T> arr(values, count > n? n : count);
ia >> arr;
if (count > n) {
boost::throw_exception(
std::range_error("communicator::recv: message receive overflow"));
}
stat.m_count = count;
}
}
template<typename T>
optional<status>
optional<status>
request::handle_serialized_irecv(request* self, request_action action)
{
typedef detail::serialized_irecv_data<T> data_t;
shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
std::shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
if (action == ra_wait) {
status stat;
@@ -1421,7 +1421,7 @@ request::handle_serialized_irecv(request* self, request_action action)
data->ia.resize(data->count);
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(data->ia.address(), data->ia.size(), MPI_PACKED,
stat.source(), stat.tag(),
stat.source(), stat.tag(),
MPI_Comm(data->comm), self->m_requests + 1));
}
@@ -1444,11 +1444,11 @@ request::handle_serialized_irecv(request* self, request_action action)
data->ia.resize(data->count);
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(data->ia.address(), data->ia.size(),MPI_PACKED,
stat.source(), stat.tag(),
stat.source(), stat.tag(),
MPI_Comm(data->comm), self->m_requests + 1));
} else
return optional<status>(); // We have not finished yet
}
}
// Check if we have received the message data
BOOST_MPI_CHECK_RESULT(MPI_Test,
@@ -1456,7 +1456,7 @@ request::handle_serialized_irecv(request* self, request_action action)
if (flag) {
data->deserialize(stat);
return stat;
} else
} else
return optional<status>();
} else {
return optional<status>();
@@ -1464,11 +1464,11 @@ request::handle_serialized_irecv(request* self, request_action action)
}
template<typename T>
optional<status>
optional<status>
request::handle_serialized_array_irecv(request* self, request_action action)
{
typedef detail::serialized_array_irecv_data<T> data_t;
shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
std::shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
if (action == ra_wait) {
status stat;
@@ -1480,7 +1480,7 @@ request::handle_serialized_array_irecv(request* self, request_action action)
data->ia.resize(data->count);
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(data->ia.address(), data->ia.size(), MPI_PACKED,
stat.source(), stat.tag(),
stat.source(), stat.tag(),
MPI_Comm(data->comm), self->m_requests + 1));
}
@@ -1503,11 +1503,11 @@ request::handle_serialized_array_irecv(request* self, request_action action)
data->ia.resize(data->count);
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(data->ia.address(), data->ia.size(),MPI_PACKED,
stat.source(), stat.tag(),
stat.source(), stat.tag(),
MPI_Comm(data->comm), self->m_requests + 1));
} else
return optional<status>(); // We have not finished yet
}
}
// Check if we have received the message data
BOOST_MPI_CHECK_RESULT(MPI_Test,
@@ -1515,7 +1515,7 @@ request::handle_serialized_array_irecv(request* self, request_action action)
if (flag) {
data->deserialize(stat);
return stat;
} else
} else
return optional<status>();
} else {
return optional<status>();
@@ -1525,12 +1525,12 @@ request::handle_serialized_array_irecv(request* self, request_action action)
// We're receiving a type that has an associated MPI datatype, so we
// map directly to that datatype.
template<typename T>
request
request
communicator::irecv_impl(int source, int tag, T& value, mpl::true_) const
{
request req;
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(const_cast<T*>(&value), 1,
(const_cast<T*>(&value), 1,
get_mpi_datatype<T>(value),
source, tag, MPI_Comm(*this), &req.m_requests[0]));
return req;
@@ -1541,34 +1541,34 @@ request
communicator::irecv_impl(int source, int tag, T& value, mpl::false_) const
{
typedef detail::serialized_irecv_data<T> data_t;
shared_ptr<data_t> data(new data_t(*this, source, tag, value));
std::shared_ptr<data_t> data(new data_t(*this, source, tag, value));
request req;
req.m_data = data;
req.m_handler = request::handle_serialized_irecv<T>;
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(&data->count, 1,
(&data->count, 1,
get_mpi_datatype<std::size_t>(data->count),
source, tag, MPI_Comm(*this), &req.m_requests[0]));
return req;
}
template<typename T>
request
request
communicator::irecv(int source, int tag, T& value) const
{
return this->irecv_impl(source, tag, value, is_mpi_datatype<T>());
}
template<typename T>
request
communicator::array_irecv_impl(int source, int tag, T* values, int n,
request
communicator::array_irecv_impl(int source, int tag, T* values, int n,
mpl::true_) const
{
request req;
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(const_cast<T*>(values), n,
(const_cast<T*>(values), n,
get_mpi_datatype<T>(*values),
source, tag, MPI_Comm(*this), &req.m_requests[0]));
return req;
@@ -1576,17 +1576,17 @@ communicator::array_irecv_impl(int source, int tag, T* values, int n,
template<typename T>
request
communicator::array_irecv_impl(int source, int tag, T* values, int n,
communicator::array_irecv_impl(int source, int tag, T* values, int n,
mpl::false_) const
{
typedef detail::serialized_array_irecv_data<T> data_t;
shared_ptr<data_t> data(new data_t(*this, source, tag, values, n));
std::shared_ptr<data_t> data(new data_t(*this, source, tag, values, n));
request req;
req.m_data = data;
req.m_handler = request::handle_serialized_array_irecv<T>;
BOOST_MPI_CHECK_RESULT(MPI_Irecv,
(&data->count, 1,
(&data->count, 1,
get_mpi_datatype<std::size_t>(data->count),
source, tag, MPI_Comm(*this), &req.m_requests[0]));
@@ -1657,7 +1657,7 @@ communicator::recv<content>(int source, int tag,
content& c) const
{
return recv<const content>(source,tag,c);
}
}
/**
* INTERNAL ONLY

View File

@@ -45,7 +45,7 @@ template<typename T>
request
communicator::isend(int dest, int tag, const skeleton_proxy<T>& proxy) const
{
shared_ptr<packed_skeleton_oarchive>
std::shared_ptr<packed_skeleton_oarchive>
archive(new packed_skeleton_oarchive(*this));
*archive << proxy.object;
@@ -58,13 +58,13 @@ namespace detail {
template<typename T>
struct serialized_irecv_data<const skeleton_proxy<T> >
{
serialized_irecv_data(const communicator& comm, int source, int tag,
serialized_irecv_data(const communicator& comm, int source, int tag,
skeleton_proxy<T> proxy)
: comm(comm), source(source), tag(tag), isa(comm),
: comm(comm), source(source), tag(tag), isa(comm),
ia(isa.get_skeleton()), proxy(proxy) { }
void deserialize(status& stat)
{
void deserialize(status& stat)
{
isa >> proxy.object;
stat.m_count = 1;
}
@@ -84,7 +84,7 @@ namespace detail {
{
typedef serialized_irecv_data<const skeleton_proxy<T> > inherited;
serialized_irecv_data(const communicator& comm, int source, int tag,
serialized_irecv_data(const communicator& comm, int source, int tag,
const skeleton_proxy<T>& proxy)
: inherited(comm, source, tag, proxy) { }
};

View File

@@ -59,7 +59,7 @@ class BOOST_MPI_DECL graph_communicator : public communicator
* underlying MPI_Comm. This operation is used for "casting" from a
* communicator to a graph communicator.
*/
explicit graph_communicator(const shared_ptr<MPI_Comm>& comm_ptr)
explicit graph_communicator(const std::shared_ptr<MPI_Comm>& comm_ptr)
{
#ifndef BOOST_DISABLE_ASSERTS
int status;
@@ -99,7 +99,7 @@ public:
*/
graph_communicator(const MPI_Comm& comm, comm_create_kind kind)
: communicator(comm, kind)
{
{
#ifndef BOOST_DISABLE_ASSERTS
int status;
BOOST_MPI_CHECK_RESULT(MPI_Topo_test, ((MPI_Comm)*this, &status));
@@ -116,8 +116,8 @@ public:
* resulting communicator will be a NULL communicator.
*
* @param comm The communicator that the new, graph communicator
* will be based on.
*
* will be based on.
*
* @param graph Any type that meets the requirements of the
* Incidence Graph and Vertex List Graph concepts from the Boost Graph
* Library. This structure of this graph will become the topology
@@ -130,8 +130,8 @@ public:
* within the original communicator.
*/
template<typename Graph>
explicit
graph_communicator(const communicator& comm, const Graph& graph,
explicit
graph_communicator(const communicator& comm, const Graph& graph,
bool reorder = false);
/**
@@ -145,7 +145,7 @@ public:
* @param comm The communicator that the new, graph communicator
* will be based on. The ranks in @c rank refer to the processes in
* this communicator.
*
*
* @param graph Any type that meets the requirements of the
* Incidence Graph and Vertex List Graph concepts from the Boost Graph
* Library. This structure of this graph will become the topology
@@ -164,8 +164,8 @@ public:
* within the original communicator.
*/
template<typename Graph, typename RankMap>
explicit
graph_communicator(const communicator& comm, const Graph& graph,
explicit
graph_communicator(const communicator& comm, const Graph& graph,
RankMap rank, bool reorder = false);
protected:
@@ -177,7 +177,7 @@ protected:
*/
template<typename Graph, typename RankMap>
void
setup_graph(const communicator& comm, const Graph& graph, RankMap rank,
setup_graph(const communicator& comm, const Graph& graph, RankMap rank,
bool reorder);
};
@@ -186,16 +186,16 @@ protected:
****************************************************************************/
template<typename Graph>
graph_communicator::graph_communicator(const communicator& comm,
const Graph& graph,
graph_communicator::graph_communicator(const communicator& comm,
const Graph& graph,
bool reorder)
{
this->setup_graph(comm, graph, get(vertex_index, graph), reorder);
}
template<typename Graph, typename RankMap>
graph_communicator::graph_communicator(const communicator& comm,
const Graph& graph,
graph_communicator::graph_communicator(const communicator& comm,
const Graph& graph,
RankMap rank, bool reorder)
{
this->setup_graph(comm, graph, rank, reorder);
@@ -204,7 +204,7 @@ graph_communicator::graph_communicator(const communicator& comm,
template<typename Graph, typename RankMap>
void
graph_communicator::setup_graph(const communicator& comm, const Graph& graph,
graph_communicator::setup_graph(const communicator& comm, const Graph& graph,
RankMap rank, bool reorder)
{
typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
@@ -234,7 +234,7 @@ graph_communicator::setup_graph(const communicator& comm, const Graph& graph,
// Create the new communicator
MPI_Comm newcomm;
BOOST_MPI_CHECK_RESULT(MPI_Graph_create,
((MPI_Comm)comm,
((MPI_Comm)comm,
nvertices,
&indices[0],
edges.empty()? (int*)0 : &edges[0],
@@ -254,10 +254,10 @@ namespace detail {
* communicator's graph topology.
*/
class comm_out_edge_iterator
: public iterator_facade<comm_out_edge_iterator,
: public iterator_facade<comm_out_edge_iterator,
std::pair<int, int>,
random_access_traversal_tag,
const std::pair<int, int>&,
const std::pair<int, int>&,
int>
{
public:
@@ -304,10 +304,10 @@ namespace detail {
* communicator's graph topology.
*/
class comm_adj_iterator
: public iterator_facade<comm_adj_iterator,
: public iterator_facade<comm_adj_iterator,
int,
random_access_traversal_tag,
int,
int,
int>
{
public:
@@ -349,10 +349,10 @@ namespace detail {
* topology.
*/
class comm_edge_iterator
: public iterator_facade<comm_edge_iterator,
: public iterator_facade<comm_edge_iterator,
std::pair<int, int>,
forward_traversal_tag,
const std::pair<int, int>&,
const std::pair<int, int>&,
int>
{
public:
@@ -381,9 +381,9 @@ namespace detail {
return edge_index == other.edge_index;
}
void increment()
{
++edge_index;
void increment()
{
++edge_index;
}
shared_array<int> indices;
@@ -478,7 +478,7 @@ int num_edges(const graph_communicator& comm);
/**
* @brief Returns a property map that maps from vertices in a
* communicator's graph topology to their index values.
* communicator's graph topology to their index values.
*
* Since the vertices are ranks in the communicator, the returned
* property map is the identity property map.
@@ -522,16 +522,16 @@ struct graph_traits<mpi::graph_communicator> {
typedef std::pair<int, int> edge_descriptor;
typedef directed_tag directed_category;
typedef disallow_parallel_edge_tag edge_parallel_category;
/**
* INTERNAL ONLY
*/
struct traversal_category
: incidence_graph_tag,
adjacency_graph_tag,
vertex_list_graph_tag,
edge_list_graph_tag
{
: incidence_graph_tag,
adjacency_graph_tag,
vertex_list_graph_tag,
edge_list_graph_tag
{
};
/**

View File

@@ -16,7 +16,7 @@
#define BOOST_MPI_GROUP_HPP
#include <boost/mpi/exception.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/std::shared_ptr.hpp>
#include <boost/optional.hpp>
#include <vector>
@@ -62,7 +62,7 @@ public:
/**
* @brief Determine the rank of the calling process in the group.
*
*
* This routine is equivalent to @c MPI_Group_rank.
*
* @returns The rank of the calling process in the group, which will
@@ -167,11 +167,11 @@ public:
*
* @returns A new group containing all of the processes in the
* current group except those processes with ranks @c [first, last)
* in the current group.
* in the current group.
*/
template<typename InputIterator>
group exclude(InputIterator first, InputIterator last);
protected:
/**
@@ -179,7 +179,7 @@ protected:
*
* Function object that frees an MPI group and deletes the
* memory associated with it. Intended to be used as a deleter with
* shared_ptr.
* std::shared_ptr.
*/
struct group_free
{
@@ -199,7 +199,7 @@ protected:
* @c group class. When there are no more such instances, the group
* will be automatically freed.
*/
shared_ptr<MPI_Group> group_ptr;
std::shared_ptr<MPI_Group> group_ptr;
};
/**
@@ -223,7 +223,7 @@ BOOST_MPI_DECL bool operator==(const group& g1, const group& g2);
* processes in the same order.
*/
inline bool operator!=(const group& g1, const group& g2)
{
{
return !(g1 == g2);
}
@@ -260,7 +260,7 @@ BOOST_MPI_DECL group operator-(const group& g1, const group& g2);
* Implementation details *
************************************************************************/
template<typename InputIterator, typename OutputIterator>
OutputIterator
OutputIterator
group::translate_ranks(InputIterator first, InputIterator last,
const group& to_group, OutputIterator out)
{
@@ -283,11 +283,11 @@ group::translate_ranks(InputIterator first, InputIterator last,
/**
* INTERNAL ONLY
*
*
* Specialization of translate_ranks that handles the one case where
* we can avoid any memory allocation or copying.
*/
template<>
template<>
BOOST_MPI_DECL int*
group::translate_ranks(int* first, int* last, const group& to_group, int* out);
@@ -306,7 +306,7 @@ group group::include(InputIterator first, InputIterator last)
/**
* INTERNAL ONLY
*
*
* Specialization of group::include that handles the one case where we
* can avoid any memory allocation or copying before creating the
* group.
@@ -328,7 +328,7 @@ group group::exclude(InputIterator first, InputIterator last)
/**
* INTERNAL ONLY
*
*
* Specialization of group::exclude that handles the one case where we
* can avoid any memory allocation or copying before creating the
* group.

View File

@@ -24,7 +24,7 @@ namespace boost { namespace mpi {
*
* Forward declaration of the MPI "group" representation, for use in
* the description of the @c intercommunicator class.
*/
*/
class group;
/**
@@ -45,8 +45,8 @@ class group;
* intercommunicators occurs between the processes in the local group
* and the processes in the remote group; communication within a group
* must use a different (intra-)communicator.
*
*/
*
*/
class BOOST_MPI_DECL intercommunicator : public communicator
{
private:
@@ -59,7 +59,7 @@ private:
* underlying MPI_Comm. This operation is used for "casting" from a
* communicator to an intercommunicator.
*/
explicit intercommunicator(const shared_ptr<MPI_Comm>& cp)
explicit intercommunicator(const std::shared_ptr<MPI_Comm>& cp)
{
this->comm_ptr = cp;
}
@@ -149,7 +149,7 @@ public:
* Merge the local and remote groups in this intercommunicator into
* a new intracommunicator containing the union of the processes in
* both groups. This method is equivalent to @c MPI_Intercomm_merge.
*
*
* @param high Whether the processes in this group should have the
* higher rank numbers than the processes in the other group. Each
* of the processes within a particular group shall have the same

View File

@@ -14,7 +14,7 @@
#include <boost/mpi/config.hpp>
#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/std::shared_ptr.hpp>
#include <boost/mpi/packed_iarchive.hpp>
namespace boost { namespace mpi {
@@ -29,7 +29,7 @@ class communicator;
* receive and will be returned from @c isend or @c irecv,
* respectively.
*/
class BOOST_MPI_DECL request
class BOOST_MPI_DECL request
{
public:
/**
@@ -62,7 +62,7 @@ class BOOST_MPI_DECL request
private:
enum request_action { ra_wait, ra_test, ra_cancel };
typedef optional<status> (*handler_type)(request* self,
typedef optional<status> (*handler_type)(request* self,
request_action action);
/**
@@ -71,7 +71,7 @@ class BOOST_MPI_DECL request
* Handles the non-blocking receive of a serialized value.
*/
template<typename T>
static optional<status>
static optional<status>
handle_serialized_irecv(request* self, request_action action);
/**
@@ -80,7 +80,7 @@ class BOOST_MPI_DECL request
* Handles the non-blocking receive of an array of serialized values.
*/
template<typename T>
static optional<status>
static optional<status>
handle_serialized_array_irecv(request* self, request_action action);
public: // template friends are not portable
@@ -92,7 +92,7 @@ class BOOST_MPI_DECL request
handler_type m_handler;
/// INTERNAL ONLY
shared_ptr<void> m_data;
std::shared_ptr<void> m_data;
friend class communicator;
};

View File

@@ -31,7 +31,7 @@
#include <boost/mpi/detail/forward_skeleton_oarchive.hpp>
#include <boost/mpi/detail/ignore_iprimitive.hpp>
#include <boost/mpi/detail/ignore_oprimitive.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/std::shared_ptr.hpp>
#include <boost/archive/detail/register_archive.hpp>
namespace boost { namespace mpi {
@@ -197,7 +197,7 @@ public:
}
private:
boost::shared_ptr<detail::mpi_datatype_holder> holder;
boost::std::shared_ptr<detail::mpi_datatype_holder> holder;
};
/** @brief Returns the content of an object, suitable for transmission