Skip to content
Snippets Groups Projects
Commit f1199196 authored by Recolic Keghart's avatar Recolic Keghart
Browse files

remove copy constructor

parent 58e6e38e
No related branches found
No related tags found
No related merge requests found
......@@ -81,13 +81,14 @@ class GPMA {
init_gpma_insertions();
}
GPMA() = default;
GPMA(const GPMA<DEV> &) = delete;
void print_status(std::string prefix = "DBG") const {
DEBUG_PRINTFLN(prefix + ": GPMA_DUMP: keys={}, values={}, row_offset={}, seg_length,tree_height,row_num={},{},{}", keys.size(), values.size(), row_offset.size(), segment_length, tree_height, row_num);
DEBUG_PRINTFLN(prefix + ": GPMA_DUMP: keys={}, values={}, row_offset={}", rlib::printable_iter(keys), rlib::printable_iter(values_for_print(values)), rlib::printable_iter(row_offset));
}
auto mirror() const {
auto &&mirror() const {
std::conditional_t<DEV == GPU, GPMA<CPU>, GPMA<GPU>> result;
result.keys = keys;
result.values = values;
......@@ -97,7 +98,7 @@ class GPMA {
result.tree_height = tree_height;
result.lower_element = lower_element;
result.upper_element = upper_element;
return result;
return std::move(result);
}
};
......
......@@ -6,7 +6,7 @@
#define FULL_MASK 0xffffffff
template <SIZE_TYPE THREADS_NUM>
__global__ void gpma_bfs_gather_kernel(SIZE_TYPE *node_queue, SIZE_TYPE *node_queue_offset, SIZE_TYPE *edge_queue, SIZE_TYPE *edge_queue_offset, KEY_TYPE *keys, VALUE_TYPE *values, SIZE_TYPE *row_offsets) {
__global__ void gpma_bfs_gather_kernel(SIZE_TYPE *node_queue, SIZE_TYPE *node_queue_offset, SIZE_TYPE *edge_queue, SIZE_TYPE *edge_queue_offset, const KEY_TYPE *keys, const VALUE_TYPE *values, const SIZE_TYPE *row_offsets) {
typedef cub::BlockScan<SIZE_TYPE, THREADS_NUM> BlockScan;
__shared__ typename BlockScan::TempStorage block_temp_storage;
......@@ -249,7 +249,7 @@ __global__ void gpma_bfs_contract_kernel(SIZE_TYPE *edge_queue, SIZE_TYPE *edge_
}
template <dev_type_t DEV>
__host__ void gpma_bfs(KEY_TYPE *keys, VALUE_TYPE *values, SIZE_TYPE *row_offsets, SIZE_TYPE node_size, SIZE_TYPE edge_size, SIZE_TYPE start_node, SIZE_TYPE *results) {
__host__ void gpma_bfs(const KEY_TYPE *keys, const VALUE_TYPE *values, const SIZE_TYPE *row_offsets, SIZE_TYPE node_size, SIZE_TYPE edge_size, SIZE_TYPE start_node, SIZE_TYPE *results) {
anyMemset<DEV>(results, 0, sizeof(SIZE_TYPE) * node_size);
SIZE_TYPE *bitmap;
......@@ -284,6 +284,7 @@ __host__ void gpma_bfs(KEY_TYPE *keys, VALUE_TYPE *values, SIZE_TYPE *row_offset
if (DEV == GPU) {
gpma_bfs_gather_kernel<THREADS_NUM><<<BLOCKS_NUM, THREADS_NUM>>>(node_queue, node_queue_offset, edge_queue, edge_queue_offset, keys, values, row_offsets);
} else {
throw std::runtime_error("Not implemented");
}
// contract
......@@ -295,6 +296,7 @@ __host__ void gpma_bfs(KEY_TYPE *keys, VALUE_TYPE *values, SIZE_TYPE *row_offset
if (DEV == GPU) {
gpma_bfs_contract_kernel<THREADS_NUM><<<BLOCKS_NUM, THREADS_NUM>>>(edge_queue, edge_queue_offset, node_queue, node_queue_offset, level, results, bitmap);
} else {
throw std::runtime_error("Not implemented");
}
anyMemcpy<DEV, CPU>(host_num, node_queue_offset, sizeof(SIZE_TYPE));
......
......@@ -73,7 +73,7 @@ int main(int argc, char **argv) {
cudaDeviceSynchronize();
LOG_TIME("before first bfs") {
auto gpma_mirror = gpma
const auto &gpma_mirror = gpma
#if TEST_DEV == CPU
.mirror()
#endif
......@@ -108,7 +108,7 @@ int main(int argc, char **argv) {
LOG_TIME("before second bfs")
{
auto gpma_mirror = gpma
const auto &gpma_mirror = gpma
#if TEST_DEV == CPU
.mirror()
#endif
......
#ifndef GPMA_MULTIDEV_CUH
#define GPMA_MULTIDEV_CUH
#include "gpma.cuh"
#include <vector>
struct gpma_multidev {
std::vector<std::pair<void *, dev_type_t>>
}
#endif
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment