Compare commits
2 Commits
e0ba85db91
...
73c9e580e4
| Author | SHA1 | Date | |
|---|---|---|---|
| 73c9e580e4 | |||
| 78bdb1ddb7 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
data
|
||||
build
|
||||
out.txt
|
||||
result.csv
|
||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@ CXX = mpic++
|
||||
CXXFLAGS = -std=c++17 -O2 -Wall -Wextra -Wno-cast-function-type
|
||||
|
||||
NVCC = nvcc
|
||||
NVCCFLAGS = -O2 -Xcompiler -fPIC
|
||||
NVCCFLAGS = -O3 -std=c++17 -arch=sm_86 -Xcompiler -fPIC
|
||||
|
||||
SRC_DIR = src
|
||||
BUILD_DIR = build
|
||||
|
||||
87
src/aggregation.cpp
Normal file
87
src/aggregation.cpp
Normal file
@@ -0,0 +1,87 @@
|
||||
#include "aggregation.hpp"
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <cmath>
|
||||
|
||||
std::vector<DayStats> aggregate_days(const std::vector<Record>& records) {
|
||||
// Группируем записи по дням
|
||||
std::map<DayIndex, std::vector<const Record*>> day_records;
|
||||
|
||||
for (const auto& r : records) {
|
||||
DayIndex day = static_cast<DayIndex>(r.timestamp) / 86400;
|
||||
day_records[day].push_back(&r);
|
||||
}
|
||||
|
||||
std::vector<DayStats> result;
|
||||
result.reserve(day_records.size());
|
||||
|
||||
for (auto& [day, recs] : day_records) {
|
||||
// Сортируем по timestamp для определения first/last
|
||||
std::sort(recs.begin(), recs.end(),
|
||||
[](const Record* a, const Record* b) {
|
||||
return a->timestamp < b->timestamp;
|
||||
});
|
||||
|
||||
DayStats stats;
|
||||
stats.day = day;
|
||||
stats.low = std::numeric_limits<double>::max();
|
||||
stats.high = std::numeric_limits<double>::lowest();
|
||||
stats.open = recs.front()->open;
|
||||
stats.close = recs.back()->close;
|
||||
stats.first_ts = recs.front()->timestamp;
|
||||
stats.last_ts = recs.back()->timestamp;
|
||||
|
||||
for (const auto* r : recs) {
|
||||
stats.low = std::min(stats.low, r->low);
|
||||
stats.high = std::max(stats.high, r->high);
|
||||
}
|
||||
|
||||
stats.avg = (stats.low + stats.high) / 2.0;
|
||||
|
||||
result.push_back(stats);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::vector<DayStats> merge_day_stats(const std::vector<DayStats>& all_stats) {
|
||||
// Объединяем статистику по одинаковым дням (если такие есть)
|
||||
std::map<DayIndex, DayStats> merged;
|
||||
|
||||
for (const auto& s : all_stats) {
|
||||
auto it = merged.find(s.day);
|
||||
if (it == merged.end()) {
|
||||
merged[s.day] = s;
|
||||
} else {
|
||||
// Объединяем данные за один день
|
||||
auto& m = it->second;
|
||||
m.low = std::min(m.low, s.low);
|
||||
m.high = std::max(m.high, s.high);
|
||||
|
||||
// open берём от записи с меньшим timestamp
|
||||
if (s.first_ts < m.first_ts) {
|
||||
m.open = s.open;
|
||||
m.first_ts = s.first_ts;
|
||||
}
|
||||
|
||||
// close берём от записи с большим timestamp
|
||||
if (s.last_ts > m.last_ts) {
|
||||
m.close = s.close;
|
||||
m.last_ts = s.last_ts;
|
||||
}
|
||||
|
||||
m.avg = (m.low + m.high) / 2.0;
|
||||
}
|
||||
}
|
||||
|
||||
// Преобразуем в отсортированный вектор
|
||||
std::vector<DayStats> result;
|
||||
result.reserve(merged.size());
|
||||
|
||||
for (auto& [day, stats] : merged) {
|
||||
result.push_back(stats);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
14
src/aggregation.hpp
Normal file
14
src/aggregation.hpp
Normal file
@@ -0,0 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include "record.hpp"
|
||||
#include "day_stats.hpp"
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
||||
// Агрегация записей по дням на одном узле
|
||||
std::vector<DayStats> aggregate_days(const std::vector<Record>& records);
|
||||
|
||||
// Объединение агрегированных данных с разных узлов
|
||||
// (на случай если один день попал на разные узлы - но в нашей схеме это не должно случиться)
|
||||
std::vector<DayStats> merge_day_stats(const std::vector<DayStats>& all_stats);
|
||||
|
||||
28
src/day_stats.hpp
Normal file
28
src/day_stats.hpp
Normal file
@@ -0,0 +1,28 @@
|
||||
#pragma once
|
||||
#include <cstdint>
|
||||
|
||||
using DayIndex = long long;
|
||||
|
||||
// Агрегированные данные за один день
|
||||
struct DayStats {
|
||||
DayIndex day; // индекс дня (timestamp / 86400)
|
||||
double low; // минимальный Low за день
|
||||
double high; // максимальный High за день
|
||||
double open; // первый Open за день
|
||||
double close; // последний Close за день
|
||||
double avg; // среднее = (low + high) / 2
|
||||
double first_ts; // timestamp первой записи (для определения порядка open)
|
||||
double last_ts; // timestamp последней записи (для определения close)
|
||||
};
|
||||
|
||||
// Интервал с изменением >= 10%
|
||||
struct Interval {
|
||||
DayIndex start_day;
|
||||
DayIndex end_day;
|
||||
double min_open;
|
||||
double max_close;
|
||||
double start_avg;
|
||||
double end_avg;
|
||||
double change;
|
||||
};
|
||||
|
||||
@@ -1,12 +1,116 @@
|
||||
#include "gpu_loader.hpp"
|
||||
#include <dlfcn.h>
|
||||
#include <map>
|
||||
#include <algorithm>
|
||||
|
||||
static void* get_gpu_lib_handle() {
|
||||
static void* h = dlopen("./libgpu_compute.so", RTLD_NOW | RTLD_LOCAL);
|
||||
return h;
|
||||
}
|
||||
|
||||
gpu_is_available_fn load_gpu_is_available() {
|
||||
void* h = dlopen("./libgpu_compute.so", RTLD_NOW | RTLD_LOCAL);
|
||||
void* h = get_gpu_lib_handle();
|
||||
if (!h) return nullptr;
|
||||
|
||||
auto fn = (gpu_is_available_fn)dlsym(h, "gpu_is_available");
|
||||
if (!fn) return nullptr;
|
||||
|
||||
return fn;
|
||||
}
|
||||
|
||||
gpu_aggregate_days_fn load_gpu_aggregate_days() {
|
||||
void* h = get_gpu_lib_handle();
|
||||
if (!h) return nullptr;
|
||||
|
||||
auto fn = (gpu_aggregate_days_fn)dlsym(h, "gpu_aggregate_days");
|
||||
return fn;
|
||||
}
|
||||
|
||||
bool aggregate_days_gpu(
|
||||
const std::vector<Record>& records,
|
||||
std::vector<DayStats>& out_stats,
|
||||
gpu_aggregate_days_fn gpu_fn)
|
||||
{
|
||||
if (!gpu_fn || records.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Группируем записи по дням и подготавливаем данные для GPU
|
||||
std::map<DayIndex, std::vector<size_t>> day_record_indices;
|
||||
|
||||
for (size_t i = 0; i < records.size(); i++) {
|
||||
DayIndex day = static_cast<DayIndex>(records[i].timestamp) / 86400;
|
||||
day_record_indices[day].push_back(i);
|
||||
}
|
||||
|
||||
int num_days = static_cast<int>(day_record_indices.size());
|
||||
|
||||
// Подготавливаем массивы для GPU
|
||||
std::vector<GpuRecord> gpu_records;
|
||||
std::vector<int> day_offsets;
|
||||
std::vector<int> day_counts;
|
||||
std::vector<long long> day_indices;
|
||||
|
||||
gpu_records.reserve(records.size());
|
||||
day_offsets.reserve(num_days);
|
||||
day_counts.reserve(num_days);
|
||||
day_indices.reserve(num_days);
|
||||
|
||||
int current_offset = 0;
|
||||
|
||||
for (auto& [day, indices] : day_record_indices) {
|
||||
day_indices.push_back(day);
|
||||
day_offsets.push_back(current_offset);
|
||||
day_counts.push_back(static_cast<int>(indices.size()));
|
||||
|
||||
// Добавляем записи этого дня
|
||||
for (size_t idx : indices) {
|
||||
const auto& r = records[idx];
|
||||
GpuRecord gr;
|
||||
gr.timestamp = r.timestamp;
|
||||
gr.open = r.open;
|
||||
gr.high = r.high;
|
||||
gr.low = r.low;
|
||||
gr.close = r.close;
|
||||
gr.volume = r.volume;
|
||||
gpu_records.push_back(gr);
|
||||
}
|
||||
|
||||
current_offset += static_cast<int>(indices.size());
|
||||
}
|
||||
|
||||
// Выделяем память для результата
|
||||
std::vector<GpuDayStats> gpu_stats(num_days);
|
||||
|
||||
// Вызываем GPU функцию
|
||||
int result = gpu_fn(
|
||||
gpu_records.data(),
|
||||
static_cast<int>(gpu_records.size()),
|
||||
day_offsets.data(),
|
||||
day_counts.data(),
|
||||
day_indices.data(),
|
||||
num_days,
|
||||
gpu_stats.data()
|
||||
);
|
||||
|
||||
if (result != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Конвертируем результат в DayStats
|
||||
out_stats.clear();
|
||||
out_stats.reserve(num_days);
|
||||
|
||||
for (const auto& gs : gpu_stats) {
|
||||
DayStats ds;
|
||||
ds.day = gs.day;
|
||||
ds.low = gs.low;
|
||||
ds.high = gs.high;
|
||||
ds.open = gs.open;
|
||||
ds.close = gs.close;
|
||||
ds.avg = gs.avg;
|
||||
ds.first_ts = gs.first_ts;
|
||||
ds.last_ts = gs.last_ts;
|
||||
out_stats.push_back(ds);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,49 @@
|
||||
#pragma once
|
||||
#include "day_stats.hpp"
|
||||
#include "record.hpp"
|
||||
#include <vector>
|
||||
|
||||
// Типы функций из GPU плагина
|
||||
using gpu_is_available_fn = int (*)();
|
||||
|
||||
// Структуры для GPU (должны совпадать с gpu_plugin.cu)
|
||||
struct GpuRecord {
|
||||
double timestamp;
|
||||
double open;
|
||||
double high;
|
||||
double low;
|
||||
double close;
|
||||
double volume;
|
||||
};
|
||||
|
||||
struct GpuDayStats {
|
||||
long long day;
|
||||
double low;
|
||||
double high;
|
||||
double open;
|
||||
double close;
|
||||
double avg;
|
||||
double first_ts;
|
||||
double last_ts;
|
||||
};
|
||||
|
||||
using gpu_aggregate_days_fn = int (*)(
|
||||
const GpuRecord* h_records,
|
||||
int num_records,
|
||||
const int* h_day_offsets,
|
||||
const int* h_day_counts,
|
||||
const long long* h_day_indices,
|
||||
int num_days,
|
||||
GpuDayStats* h_out_stats
|
||||
);
|
||||
|
||||
// Загрузка функций из плагина
|
||||
gpu_is_available_fn load_gpu_is_available();
|
||||
gpu_aggregate_days_fn load_gpu_aggregate_days();
|
||||
|
||||
// Обёртка для агрегации на GPU (возвращает true если успешно)
|
||||
bool aggregate_days_gpu(
|
||||
const std::vector<Record>& records,
|
||||
std::vector<DayStats>& out_stats,
|
||||
gpu_aggregate_days_fn gpu_fn
|
||||
);
|
||||
|
||||
@@ -1,4 +1,27 @@
|
||||
#include <cuda_runtime.h>
|
||||
#include <cstdint>
|
||||
#include <cfloat>
|
||||
|
||||
// Структуры данных (должны совпадать с C++ кодом)
|
||||
struct GpuRecord {
|
||||
double timestamp;
|
||||
double open;
|
||||
double high;
|
||||
double low;
|
||||
double close;
|
||||
double volume;
|
||||
};
|
||||
|
||||
struct GpuDayStats {
|
||||
long long day;
|
||||
double low;
|
||||
double high;
|
||||
double open;
|
||||
double close;
|
||||
double avg;
|
||||
double first_ts;
|
||||
double last_ts;
|
||||
};
|
||||
|
||||
extern "C" int gpu_is_available() {
|
||||
int n = 0;
|
||||
@@ -6,3 +29,139 @@ extern "C" int gpu_is_available() {
|
||||
if (err != cudaSuccess) return 0;
|
||||
return (n > 0) ? 1 : 0;
|
||||
}
|
||||
|
||||
// Kernel для агрегации (один поток обрабатывает все данные)
|
||||
__global__ void aggregate_kernel(
|
||||
const GpuRecord* records,
|
||||
int num_records,
|
||||
const int* day_offsets, // начало каждого дня в массиве records
|
||||
const int* day_counts, // количество записей в каждом дне
|
||||
const long long* day_indices, // индексы дней
|
||||
int num_days,
|
||||
GpuDayStats* out_stats)
|
||||
{
|
||||
// Один поток обрабатывает все дни последовательно
|
||||
for (int d = 0; d < num_days; d++) {
|
||||
int offset = day_offsets[d];
|
||||
int count = day_counts[d];
|
||||
|
||||
GpuDayStats stats;
|
||||
stats.day = day_indices[d];
|
||||
stats.low = DBL_MAX;
|
||||
stats.high = -DBL_MAX;
|
||||
stats.first_ts = DBL_MAX;
|
||||
stats.last_ts = -DBL_MAX;
|
||||
stats.open = 0;
|
||||
stats.close = 0;
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
const GpuRecord& r = records[offset + i];
|
||||
|
||||
// min/max
|
||||
if (r.low < stats.low) stats.low = r.low;
|
||||
if (r.high > stats.high) stats.high = r.high;
|
||||
|
||||
// first/last по timestamp
|
||||
if (r.timestamp < stats.first_ts) {
|
||||
stats.first_ts = r.timestamp;
|
||||
stats.open = r.open;
|
||||
}
|
||||
if (r.timestamp > stats.last_ts) {
|
||||
stats.last_ts = r.timestamp;
|
||||
stats.close = r.close;
|
||||
}
|
||||
}
|
||||
|
||||
stats.avg = (stats.low + stats.high) / 2.0;
|
||||
out_stats[d] = stats;
|
||||
}
|
||||
}
|
||||
|
||||
// Функция агрегации, вызываемая из C++
|
||||
extern "C" int gpu_aggregate_days(
|
||||
const GpuRecord* h_records,
|
||||
int num_records,
|
||||
const int* h_day_offsets,
|
||||
const int* h_day_counts,
|
||||
const long long* h_day_indices,
|
||||
int num_days,
|
||||
GpuDayStats* h_out_stats)
|
||||
{
|
||||
// Выделяем память на GPU
|
||||
GpuRecord* d_records = nullptr;
|
||||
int* d_day_offsets = nullptr;
|
||||
int* d_day_counts = nullptr;
|
||||
long long* d_day_indices = nullptr;
|
||||
GpuDayStats* d_out_stats = nullptr;
|
||||
|
||||
cudaError_t err;
|
||||
|
||||
err = cudaMalloc(&d_records, num_records * sizeof(GpuRecord));
|
||||
if (err != cudaSuccess) return -1;
|
||||
|
||||
err = cudaMalloc(&d_day_offsets, num_days * sizeof(int));
|
||||
if (err != cudaSuccess) { cudaFree(d_records); return -2; }
|
||||
|
||||
err = cudaMalloc(&d_day_counts, num_days * sizeof(int));
|
||||
if (err != cudaSuccess) { cudaFree(d_records); cudaFree(d_day_offsets); return -3; }
|
||||
|
||||
err = cudaMalloc(&d_day_indices, num_days * sizeof(long long));
|
||||
if (err != cudaSuccess) { cudaFree(d_records); cudaFree(d_day_offsets); cudaFree(d_day_counts); return -4; }
|
||||
|
||||
err = cudaMalloc(&d_out_stats, num_days * sizeof(GpuDayStats));
|
||||
if (err != cudaSuccess) { cudaFree(d_records); cudaFree(d_day_offsets); cudaFree(d_day_counts); cudaFree(d_day_indices); return -5; }
|
||||
|
||||
// Копируем данные на GPU
|
||||
err = cudaMemcpy(d_records, h_records, num_records * sizeof(GpuRecord), cudaMemcpyHostToDevice);
|
||||
if (err != cudaSuccess) return -10;
|
||||
|
||||
err = cudaMemcpy(d_day_offsets, h_day_offsets, num_days * sizeof(int), cudaMemcpyHostToDevice);
|
||||
if (err != cudaSuccess) return -11;
|
||||
|
||||
err = cudaMemcpy(d_day_counts, h_day_counts, num_days * sizeof(int), cudaMemcpyHostToDevice);
|
||||
if (err != cudaSuccess) return -12;
|
||||
|
||||
err = cudaMemcpy(d_day_indices, h_day_indices, num_days * sizeof(long long), cudaMemcpyHostToDevice);
|
||||
if (err != cudaSuccess) return -13;
|
||||
|
||||
// Запускаем kernel (1 блок, 1 поток)
|
||||
aggregate_kernel<<<1, 1>>>(
|
||||
d_records, num_records,
|
||||
d_day_offsets, d_day_counts, d_day_indices,
|
||||
num_days, d_out_stats
|
||||
);
|
||||
|
||||
// Проверяем ошибку запуска kernel
|
||||
err = cudaGetLastError();
|
||||
if (err != cudaSuccess) {
|
||||
cudaFree(d_records);
|
||||
cudaFree(d_day_offsets);
|
||||
cudaFree(d_day_counts);
|
||||
cudaFree(d_day_indices);
|
||||
cudaFree(d_out_stats);
|
||||
return -7;
|
||||
}
|
||||
|
||||
// Ждём завершения
|
||||
err = cudaDeviceSynchronize();
|
||||
if (err != cudaSuccess) {
|
||||
cudaFree(d_records);
|
||||
cudaFree(d_day_offsets);
|
||||
cudaFree(d_day_counts);
|
||||
cudaFree(d_day_indices);
|
||||
cudaFree(d_out_stats);
|
||||
return -6;
|
||||
}
|
||||
|
||||
// Копируем результат обратно
|
||||
cudaMemcpy(h_out_stats, d_out_stats, num_days * sizeof(GpuDayStats), cudaMemcpyDeviceToHost);
|
||||
|
||||
// Освобождаем память
|
||||
cudaFree(d_records);
|
||||
cudaFree(d_day_offsets);
|
||||
cudaFree(d_day_counts);
|
||||
cudaFree(d_day_indices);
|
||||
cudaFree(d_out_stats);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
83
src/intervals.cpp
Normal file
83
src/intervals.cpp
Normal file
@@ -0,0 +1,83 @@
|
||||
#include "intervals.hpp"
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <ctime>
|
||||
|
||||
std::vector<Interval> find_intervals(const std::vector<DayStats>& days, double threshold) {
|
||||
if (days.empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<Interval> intervals;
|
||||
|
||||
size_t start_idx = 0;
|
||||
double price_base = days[start_idx].avg;
|
||||
|
||||
for (size_t i = 1; i < days.size(); i++) {
|
||||
double price_now = days[i].avg;
|
||||
double change = std::abs(price_now - price_base) / price_base;
|
||||
|
||||
if (change >= threshold) {
|
||||
Interval interval;
|
||||
interval.start_day = days[start_idx].day;
|
||||
interval.end_day = days[i].day;
|
||||
interval.start_avg = price_base;
|
||||
interval.end_avg = price_now;
|
||||
interval.change = change;
|
||||
|
||||
// Находим min(Open) и max(Close) в интервале
|
||||
interval.min_open = days[start_idx].open;
|
||||
interval.max_close = days[start_idx].close;
|
||||
|
||||
for (size_t j = start_idx; j <= i; j++) {
|
||||
interval.min_open = std::min(interval.min_open, days[j].open);
|
||||
interval.max_close = std::max(interval.max_close, days[j].close);
|
||||
}
|
||||
|
||||
intervals.push_back(interval);
|
||||
|
||||
// Начинаем новый интервал
|
||||
start_idx = i + 1;
|
||||
if (start_idx >= days.size()) {
|
||||
break;
|
||||
}
|
||||
price_base = days[start_idx].avg;
|
||||
}
|
||||
}
|
||||
|
||||
return intervals;
|
||||
}
|
||||
|
||||
std::string day_index_to_date(DayIndex day) {
|
||||
time_t ts = static_cast<time_t>(day) * 86400;
|
||||
struct tm* tm_info = gmtime(&ts);
|
||||
|
||||
std::ostringstream oss;
|
||||
oss << std::setfill('0')
|
||||
<< (tm_info->tm_year + 1900) << "-"
|
||||
<< std::setw(2) << (tm_info->tm_mon + 1) << "-"
|
||||
<< std::setw(2) << tm_info->tm_mday;
|
||||
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
void write_intervals(const std::string& filename, const std::vector<Interval>& intervals) {
|
||||
std::ofstream out(filename);
|
||||
|
||||
out << std::fixed << std::setprecision(2);
|
||||
out << "start_date,end_date,min_open,max_close,start_avg,end_avg,change\n";
|
||||
|
||||
for (const auto& iv : intervals) {
|
||||
out << day_index_to_date(iv.start_day) << ","
|
||||
<< day_index_to_date(iv.end_day) << ","
|
||||
<< iv.min_open << ","
|
||||
<< iv.max_close << ","
|
||||
<< iv.start_avg << ","
|
||||
<< iv.end_avg << ","
|
||||
<< std::setprecision(6) << iv.change << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
15
src/intervals.hpp
Normal file
15
src/intervals.hpp
Normal file
@@ -0,0 +1,15 @@
|
||||
#pragma once
|
||||
|
||||
#include "day_stats.hpp"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
// Вычисление интервалов с изменением >= threshold (по умолчанию 10%)
|
||||
std::vector<Interval> find_intervals(const std::vector<DayStats>& days, double threshold = 0.10);
|
||||
|
||||
// Вывод интервалов в файл
|
||||
void write_intervals(const std::string& filename, const std::vector<Interval>& intervals);
|
||||
|
||||
// Преобразование DayIndex в строку даты (YYYY-MM-DD)
|
||||
std::string day_index_to_date(DayIndex day);
|
||||
|
||||
98
src/main.cpp
98
src/main.cpp
@@ -6,12 +6,15 @@
|
||||
#include "csv_loader.hpp"
|
||||
#include "utils.hpp"
|
||||
#include "record.hpp"
|
||||
#include "day_stats.hpp"
|
||||
#include "aggregation.hpp"
|
||||
#include "intervals.hpp"
|
||||
#include "gpu_loader.hpp"
|
||||
|
||||
// Функция: отобрать записи для конкретного ранга
|
||||
std::vector<Record> select_records_for_rank(
|
||||
const std::map<long long, std::vector<Record>>& days,
|
||||
const std::vector<long long>& day_list)
|
||||
const std::map<DayIndex, std::vector<Record>>& days,
|
||||
const std::vector<DayIndex>& day_list)
|
||||
{
|
||||
std::vector<Record> out;
|
||||
for (auto d : day_list) {
|
||||
@@ -31,6 +34,18 @@ int main(int argc, char** argv) {
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &size);
|
||||
|
||||
// ====== ЗАГРУЗКА GPU ФУНКЦИЙ ======
|
||||
auto gpu_is_available = load_gpu_is_available();
|
||||
auto gpu_aggregate = load_gpu_aggregate_days();
|
||||
|
||||
bool have_gpu = false;
|
||||
if (gpu_is_available && gpu_is_available()) {
|
||||
have_gpu = true;
|
||||
std::cout << "Rank " << rank << ": GPU available" << std::endl;
|
||||
} else {
|
||||
std::cout << "Rank " << rank << ": GPU not available, using CPU" << std::endl;
|
||||
}
|
||||
|
||||
std::vector<Record> local_records;
|
||||
|
||||
if (rank == 0) {
|
||||
@@ -52,7 +67,7 @@ int main(int argc, char** argv) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int count = vec.size();
|
||||
int count = static_cast<int>(vec.size());
|
||||
MPI_Send(&count, 1, MPI_INT, r, 0, MPI_COMM_WORLD);
|
||||
MPI_Send(vec.data(), count * sizeof(Record), MPI_BYTE, r, 1, MPI_COMM_WORLD);
|
||||
}
|
||||
@@ -72,15 +87,80 @@ int main(int argc, char** argv) {
|
||||
std::cout << "Rank " << rank << " received "
|
||||
<< local_records.size() << " records" << std::endl;
|
||||
|
||||
// ====== АГРЕГАЦИЯ НА КАЖДОМ УЗЛЕ (GPU или CPU) ======
|
||||
std::vector<DayStats> local_stats;
|
||||
|
||||
auto gpu_is_available = load_gpu_is_available();
|
||||
int have_gpu = 0;
|
||||
if (gpu_is_available) {
|
||||
std::cout << "Rank " << rank << " dll loaded" << std::endl;
|
||||
have_gpu = gpu_is_available();
|
||||
if (have_gpu && gpu_aggregate) {
|
||||
bool gpu_success = aggregate_days_gpu(local_records, local_stats, gpu_aggregate);
|
||||
if (gpu_success) {
|
||||
std::cout << "Rank " << rank << " aggregated "
|
||||
<< local_stats.size() << " days (GPU)" << std::endl;
|
||||
} else {
|
||||
// Fallback на CPU при ошибке GPU
|
||||
std::cout << "Rank " << rank << ": GPU aggregation failed, falling back to CPU" << std::endl;
|
||||
local_stats = aggregate_days(local_records);
|
||||
std::cout << "Rank " << rank << " aggregated "
|
||||
<< local_stats.size() << " days (CPU)" << std::endl;
|
||||
}
|
||||
} else {
|
||||
local_stats = aggregate_days(local_records);
|
||||
std::cout << "Rank " << rank << " aggregated "
|
||||
<< local_stats.size() << " days (CPU)" << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "Rank " << rank << ": gpu_available=" << have_gpu << "\n";
|
||||
// ====== СБОР АГРЕГИРОВАННЫХ ДАННЫХ НА RANK 0 ======
|
||||
std::vector<DayStats> all_stats;
|
||||
|
||||
if (rank == 0) {
|
||||
// Добавляем свои данные
|
||||
all_stats.insert(all_stats.end(), local_stats.begin(), local_stats.end());
|
||||
|
||||
// Получаем данные от других узлов
|
||||
for (int r = 1; r < size; r++) {
|
||||
int count = 0;
|
||||
MPI_Recv(&count, 1, MPI_INT, r, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||
|
||||
std::vector<DayStats> remote_stats(count);
|
||||
MPI_Recv(remote_stats.data(), count * sizeof(DayStats),
|
||||
MPI_BYTE, r, 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||
|
||||
all_stats.insert(all_stats.end(), remote_stats.begin(), remote_stats.end());
|
||||
}
|
||||
} else {
|
||||
// Отправляем свои агрегированные данные на rank 0
|
||||
int count = static_cast<int>(local_stats.size());
|
||||
MPI_Send(&count, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);
|
||||
MPI_Send(local_stats.data(), count * sizeof(DayStats), MPI_BYTE, 0, 3, MPI_COMM_WORLD);
|
||||
}
|
||||
|
||||
// ====== ВЫЧИСЛЕНИЕ ИНТЕРВАЛОВ НА RANK 0 ======
|
||||
if (rank == 0) {
|
||||
std::cout << "Rank 0: merging " << all_stats.size() << " day stats..." << std::endl;
|
||||
|
||||
// Объединяем и сортируем
|
||||
auto merged_stats = merge_day_stats(all_stats);
|
||||
std::cout << "Rank 0: total " << merged_stats.size() << " unique days" << std::endl;
|
||||
|
||||
// Вычисляем интервалы
|
||||
auto intervals = find_intervals(merged_stats, 0.10);
|
||||
std::cout << "Found " << intervals.size() << " intervals with >=10% change" << std::endl;
|
||||
|
||||
// Записываем результат
|
||||
write_intervals("../result.csv", intervals);
|
||||
std::cout << "Results written to result.csv" << std::endl;
|
||||
|
||||
// Выводим первые несколько интервалов
|
||||
std::cout << "\nFirst 5 intervals:\n";
|
||||
std::cout << "start_date,end_date,min_open,max_close,change\n";
|
||||
for (size_t i = 0; i < std::min(intervals.size(), size_t(5)); i++) {
|
||||
const auto& iv = intervals[i];
|
||||
std::cout << day_index_to_date(iv.start_day) << ","
|
||||
<< day_index_to_date(iv.end_day) << ","
|
||||
<< iv.min_open << ","
|
||||
<< iv.max_close << ","
|
||||
<< iv.change << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Finalize();
|
||||
return 0;
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "record.hpp"
|
||||
#include "day_stats.hpp"
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
using DayIndex = long long;
|
||||
|
||||
std::map<DayIndex, std::vector<Record>> group_by_day(const std::vector<Record>& recs);
|
||||
std::vector<std::vector<DayIndex>> split_days(const std::map<DayIndex, std::vector<Record>>& days, int parts);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user