eliminate leak and fix memory tracking
This commit is contained in:
parent
5ad10a1057
commit
bd541bc57f
@ -52,7 +52,7 @@ int bpreds(int *dop1, int rows, int cols, int *bin, int3 numpreds, int **ret)
|
|||||||
int tmplen = rows + 1;
|
int tmplen = rows + 1;
|
||||||
int size = tmplen * sizeof(int);
|
int size = tmplen * sizeof(int);
|
||||||
reservar(&temp, size);
|
reservar(&temp, size);
|
||||||
// cerr << "+ " << temp << " temp bpreds " << size << endl;
|
// DEBUG_MEM cerr << "+ " << temp << " temp bpreds " << size << endl;
|
||||||
cudaMemset(temp, 0, size);
|
cudaMemset(temp, 0, size);
|
||||||
|
|
||||||
#if TIMER
|
#if TIMER
|
||||||
@ -68,7 +68,7 @@ int bpreds(int *dop1, int rows, int cols, int *bin, int3 numpreds, int **ret)
|
|||||||
else
|
else
|
||||||
hsize = sproj;
|
hsize = sproj;
|
||||||
reservar(&dhead, hsize);
|
reservar(&dhead, hsize);
|
||||||
// cerr << "+ " << dhead << " dhead " << hsize << endl;
|
// DEBUG_MEM cerr << "+ " << dhead << " dhead " << hsize << endl;
|
||||||
cudaMemcpy(dhead, bin, spredn, cudaMemcpyHostToDevice);
|
cudaMemcpy(dhead, bin, spredn, cudaMemcpyHostToDevice);
|
||||||
|
|
||||||
int blockllen = rows / 1024 + 1;
|
int blockllen = rows / 1024 + 1;
|
||||||
@ -113,7 +113,7 @@ int bpreds(int *dop1, int rows, int cols, int *bin, int3 numpreds, int **ret)
|
|||||||
|
|
||||||
int *fres;
|
int *fres;
|
||||||
reservar(&fres, num * sproj);
|
reservar(&fres, num * sproj);
|
||||||
// cerr << "+ " << fres << " fres " << num * sproj << endl;
|
// DEBUG_MEM cerr << "+ " << fres << " fres " << num * sproj << endl;
|
||||||
cudaMemcpy(dhead, bin + predn, sproj, cudaMemcpyHostToDevice);
|
cudaMemcpy(dhead, bin + predn, sproj, cudaMemcpyHostToDevice);
|
||||||
llenarproyectar<<<blockllen, numthreads, sproj>>>(dop1, rows, numpreds.y, temp, dhead, numpreds.z, fres);
|
llenarproyectar<<<blockllen, numthreads, sproj>>>(dop1, rows, numpreds.y, temp, dhead, numpreds.z, fres);
|
||||||
|
|
||||||
|
@ -859,6 +859,7 @@ extern "C"
|
|||||||
void Cuda_Statistics(void)
|
void Cuda_Statistics(void)
|
||||||
{
|
{
|
||||||
cerr << "GPU Statistics" << endl;
|
cerr << "GPU Statistics" << endl;
|
||||||
|
#if TIMER
|
||||||
cerr << "Called " << cuda_stats.calls << "times." << endl;
|
cerr << "Called " << cuda_stats.calls << "times." << endl;
|
||||||
cerr << "GPU time " << cuda_stats.total_time << "msec." << endl;
|
cerr << "GPU time " << cuda_stats.total_time << "msec." << endl;
|
||||||
cerr << "Longest call " << cuda_stats.max_time << "msec." << endl;
|
cerr << "Longest call " << cuda_stats.max_time << "msec." << endl;
|
||||||
@ -875,6 +876,7 @@ extern "C"
|
|||||||
cerr << " Selects/Projects: " << cuda_stats.selects << "." << endl;
|
cerr << " Selects/Projects: " << cuda_stats.selects << "." << endl;
|
||||||
cerr << " Unions: " << cuda_stats.unions << "." << endl;
|
cerr << " Unions: " << cuda_stats.unions << "." << endl;
|
||||||
cerr << " Built-ins: " << cuda_stats.builtins << "." << endl << endl;
|
cerr << " Built-ins: " << cuda_stats.builtins << "." << endl << endl;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
@ -946,11 +948,13 @@ int Cuda_Eval(predicate **inpfacts, int ninpf, predicate **inprules, int ninpr,
|
|||||||
vector<gpunode>::iterator qposf;
|
vector<gpunode>::iterator qposf;
|
||||||
vector<rulenode>::iterator qposr;
|
vector<rulenode>::iterator qposr;
|
||||||
|
|
||||||
|
#if TIMER
|
||||||
cudaEvent_t start, stop;
|
cudaEvent_t start, stop;
|
||||||
float time;
|
float time;
|
||||||
cudaEventCreate(&start);
|
cudaEventCreate(&start);
|
||||||
cudaEventCreate(&stop);
|
cudaEventCreate(&stop);
|
||||||
cudaEventRecord(start, 0);
|
cudaEventRecord(start, 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
while(reglas.size()) /*Here's the main loop*/
|
while(reglas.size()) /*Here's the main loop*/
|
||||||
{
|
{
|
||||||
@ -996,7 +1000,7 @@ int Cuda_Eval(predicate **inpfacts, int ninpf, predicate **inprules, int ninpr,
|
|||||||
{
|
{
|
||||||
num_refs = rows1 * cols1 * sizeof(int);
|
num_refs = rows1 * cols1 * sizeof(int);
|
||||||
reservar(&res, num_refs);
|
reservar(&res, num_refs);
|
||||||
// cerr << "+ " << res << " Res " << num_refs << endl;
|
// DEBUG_MEM cerr << "+ " << res << " Res " << num_refs << endl;
|
||||||
cudaMemcpyAsync(res, dop1, num_refs, cudaMemcpyDeviceToDevice);
|
cudaMemcpyAsync(res, dop1, num_refs, cudaMemcpyDeviceToDevice);
|
||||||
registrar(rul_act->name, cols1, res, rows1, itr, 1);
|
registrar(rul_act->name, cols1, res, rows1, itr, 1);
|
||||||
rul_act->gen_ant = rul_act->gen_act;
|
rul_act->gen_ant = rul_act->gen_act;
|
||||||
@ -1285,7 +1289,7 @@ int Cuda_Eval(predicate **inpfacts, int ninpf, predicate **inprules, int ninpr,
|
|||||||
res_rows = selectproyect(dop1, rows1, cols1, tmprule.num_columns, tmprule.select[0], tmprule.numsel[0], tmprule.selfjoin[0], tmprule.numselfj[0], tmprule.project[0], &res);
|
res_rows = selectproyect(dop1, rows1, cols1, tmprule.num_columns, tmprule.select[0], tmprule.numsel[0], tmprule.selfjoin[0], tmprule.numselfj[0], tmprule.project[0], &res);
|
||||||
if(qposr != fin && qposr->name == qname) {
|
if(qposr != fin && qposr->name == qname) {
|
||||||
cudaFree(dop1);
|
cudaFree(dop1);
|
||||||
// cerr << "- " << dop1 << " dop1" << endl;
|
// DEBUG_MEM cerr << "- " << dop1 << " dop1" << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1293,14 +1297,15 @@ int Cuda_Eval(predicate **inpfacts, int ninpf, predicate **inprules, int ninpr,
|
|||||||
tipo = res_rows * cols1 * sizeof(int);
|
tipo = res_rows * cols1 * sizeof(int);
|
||||||
hres = (int *)malloc(tipo);
|
hres = (int *)malloc(tipo);
|
||||||
cudaMemcpy(hres, res, tipo, cudaMemcpyDeviceToHost);
|
cudaMemcpy(hres, res, tipo, cudaMemcpyDeviceToHost);
|
||||||
if(res_rows > 0 && tmprule.numsel[0] != 0 && tmprule.numselfj[0] != 0) {
|
if(res_rows > 0 /*&& tmprule.numsel[0] != 0 && tmprule.numselfj[0] != 0 */) {
|
||||||
cudaFree(res);
|
cudaFree(res);
|
||||||
// cerr << "- " << res << " res" << endl;
|
// DEBUG_MEM cerr << "- " << res << " res" << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
res_rows = 0;
|
res_rows = 0;
|
||||||
|
|
||||||
|
#if TIMER
|
||||||
cudaEventRecord(stop, 0);
|
cudaEventRecord(stop, 0);
|
||||||
cudaEventSynchronize(stop);
|
cudaEventSynchronize(stop);
|
||||||
cudaEventElapsedTime(&time, start, stop);
|
cudaEventElapsedTime(&time, start, stop);
|
||||||
@ -1311,6 +1316,7 @@ int Cuda_Eval(predicate **inpfacts, int ninpf, predicate **inprules, int ninpr,
|
|||||||
cuda_stats.min_time = time;
|
cuda_stats.min_time = time;
|
||||||
cudaEventDestroy(start);
|
cudaEventDestroy(start);
|
||||||
cudaEventDestroy(stop);
|
cudaEventDestroy(stop);
|
||||||
|
#endif
|
||||||
|
|
||||||
if(showr == 1)
|
if(showr == 1)
|
||||||
{
|
{
|
||||||
|
@ -112,7 +112,7 @@ void limpiar(const char s[])
|
|||||||
|
|
||||||
if(GPUmem.size() == 0)
|
if(GPUmem.size() == 0)
|
||||||
{
|
{
|
||||||
// cerr << s << ": not enough GPU memory: have " << avmem << endl;
|
// DEBUG_MEM cerr << s << ": not enough GPU memory: have " << avmem << endl;
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ void liberar(int *ptr, int size)
|
|||||||
//cout << "L " << avmem << " " << size;
|
//cout << "L " << avmem << " " << size;
|
||||||
|
|
||||||
cudaFree(ptr);
|
cudaFree(ptr);
|
||||||
// cerr << "- " << ptr << " " << size << endl;
|
// DEBUG_MEM cerr << "- " << ptr << " " << size << endl;
|
||||||
avmem += size;
|
avmem += size;
|
||||||
|
|
||||||
//cout << " " << avmem << endl;
|
//cout << " " << avmem << endl;
|
||||||
@ -186,7 +186,7 @@ void reservar(int **ptr, int size)
|
|||||||
{
|
{
|
||||||
//size_t free, total;
|
//size_t free, total;
|
||||||
//cudaMemGetInfo( &free, &total );
|
//cudaMemGetInfo( &free, &total );
|
||||||
// // cerr << "- " << free << " " << size << endl;
|
// cerr << "? " << free << " " << size << endl;
|
||||||
|
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
*ptr = NULL;
|
*ptr = NULL;
|
||||||
@ -199,8 +199,8 @@ void reservar(int **ptr, int size)
|
|||||||
if (! *ptr ) {
|
if (! *ptr ) {
|
||||||
size_t free, total;
|
size_t free, total;
|
||||||
cudaMemGetInfo( &free, &total );
|
cudaMemGetInfo( &free, &total );
|
||||||
// cerr << "Could not allocate " << size << " bytes, only " << free << " avaliable from total of " << total << " !!!" << endl;
|
cerr << "Could not allocate " << size << " bytes, only " << free << " avaliable from total of " << total << " !!!" << endl;
|
||||||
// cerr << "Exiting CUDA...." << endl;
|
cerr << "Exiting CUDA...." << endl;
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
avmem -= size;
|
avmem -= size;
|
||||||
@ -277,7 +277,7 @@ int cargar(int name, int num_rows, int num_columns, int is_fact, int *address_ho
|
|||||||
}
|
}
|
||||||
size = num_rows * num_columns * sizeof(int);
|
size = num_rows * num_columns * sizeof(int);
|
||||||
reservar(&temp, size);
|
reservar(&temp, size);
|
||||||
// cerr << "+ " << temp << " temp " << size << endl;
|
// DEBUG_MEM cerr << "+ " << temp << " temp " << size << endl;
|
||||||
cudaMemcpyAsync(temp, address_host_table, size, cudaMemcpyHostToDevice);
|
cudaMemcpyAsync(temp, address_host_table, size, cudaMemcpyHostToDevice);
|
||||||
registrar(name, num_columns, temp, num_rows, itr, 0);
|
registrar(name, num_columns, temp, num_rows, itr, 0);
|
||||||
*ptr = temp;
|
*ptr = temp;
|
||||||
@ -297,7 +297,7 @@ int cargar(int name, int num_rows, int num_columns, int is_fact, int *address_ho
|
|||||||
}
|
}
|
||||||
size = totalrows * num_columns * sizeof(int);
|
size = totalrows * num_columns * sizeof(int);
|
||||||
reservar(&temp, size);
|
reservar(&temp, size);
|
||||||
// cerr << "+ " << temp << " temp 2 " << size << endl;
|
// DEBUG_MEM cerr << "+ " << temp << " temp 2 " << size << endl;
|
||||||
for(x = 1; x < numgpu; x++)
|
for(x = 1; x < numgpu; x++)
|
||||||
{
|
{
|
||||||
cudaMemcpyAsync(temp + temp_storage[x-1].size, temp_storage[x].dev_address, temp_storage[x].size, cudaMemcpyDeviceToDevice);
|
cudaMemcpyAsync(temp + temp_storage[x-1].size, temp_storage[x].dev_address, temp_storage[x].size, cudaMemcpyDeviceToDevice);
|
||||||
@ -340,7 +340,7 @@ int cargafinal(int name, int cols, int **ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
reservar(&temp, cont * cols * sizeof(int));
|
reservar(&temp, cont * cols * sizeof(int));
|
||||||
// cerr << "+ " << temp << " temp 3 " << cont * cols * sizeof(int) << endl;
|
// DEBUG_MEM cerr << "+ " << temp << " temp 3 " << cont * cols * sizeof(int) << endl;
|
||||||
ini = temp;
|
ini = temp;
|
||||||
|
|
||||||
pos = gpu;
|
pos = gpu;
|
||||||
@ -463,7 +463,7 @@ void resultados(vector<rulenode>::iterator first, vector<rulenode>::iterator las
|
|||||||
cout << endl;
|
cout << endl;
|
||||||
}
|
}
|
||||||
cudaFree(gpu->dev_address);
|
cudaFree(gpu->dev_address);
|
||||||
// cerr << "- " << gpu->dev_address << " gpu->dev_address" << endl;
|
// DEBUG_MEM cerr << "- " << gpu->dev_address << " gpu->dev_address" << endl;
|
||||||
free(temp);
|
free(temp);
|
||||||
gpu++;
|
gpu++;
|
||||||
}
|
}
|
||||||
@ -495,7 +495,7 @@ void clear_memory()
|
|||||||
{
|
{
|
||||||
if (ini->isrule) {
|
if (ini->isrule) {
|
||||||
cudaFree(ini->dev_address);
|
cudaFree(ini->dev_address);
|
||||||
// cerr << "- " << ini->dev_address << " ini->dev_address" << endl;
|
// DEBUG_MEM cerr << "- " << ini->dev_address << " ini->dev_address" << endl;
|
||||||
ini = GPUmem.erase(ini);
|
ini = GPUmem.erase(ini);
|
||||||
} else {
|
} else {
|
||||||
ini++;
|
ini++;
|
||||||
|
@ -211,7 +211,7 @@ int selectproyect(int *dop1, int rows, int cols, int head_size, int *select, int
|
|||||||
#endif
|
#endif
|
||||||
int head_bytes = mayor(numselect, numselfj, head_size) * sizeof(int);
|
int head_bytes = mayor(numselect, numselfj, head_size) * sizeof(int);
|
||||||
reservar(&dhead, head_bytes);
|
reservar(&dhead, head_bytes);
|
||||||
// cerr << "+ " << dhead << " dhead " << head_bytes << endl;
|
// DEBUG_MEM cerr << "+ " << dhead << " dhead " << head_bytes << endl;
|
||||||
|
|
||||||
int blockllen = rows / 1024 + 1;
|
int blockllen = rows / 1024 + 1;
|
||||||
int numthreads = 1024;
|
int numthreads = 1024;
|
||||||
@ -222,7 +222,7 @@ int selectproyect(int *dop1, int rows, int cols, int head_size, int *select, int
|
|||||||
tmplen = rows + 1;
|
tmplen = rows + 1;
|
||||||
size2 = tmplen * sizeof(int);
|
size2 = tmplen * sizeof(int);
|
||||||
reservar(&temp, size2);
|
reservar(&temp, size2);
|
||||||
// cerr << "+ " << temp << " temp select " << size2 << endl;
|
// DEBUG_MEM cerr << "+ " << temp << " temp select " << size2 << endl;
|
||||||
cudaMemset(temp, 0, size2);
|
cudaMemset(temp, 0, size2);
|
||||||
|
|
||||||
size = numselect * sizeof(int);
|
size = numselect * sizeof(int);
|
||||||
@ -245,7 +245,7 @@ int selectproyect(int *dop1, int rows, int cols, int head_size, int *select, int
|
|||||||
|
|
||||||
size = head_size * sizeof(int);
|
size = head_size * sizeof(int);
|
||||||
reservar(&fres, num * size);
|
reservar(&fres, num * size);
|
||||||
// cerr << "+ " << fres << " fres select " << num*size << endl;
|
// DEBUG_MEM cerr << "+ " << fres << " fres select " << num*size << endl;
|
||||||
cudaMemcpy(dhead, project, size, cudaMemcpyHostToDevice);
|
cudaMemcpy(dhead, project, size, cudaMemcpyHostToDevice);
|
||||||
llenarproyectar<<<blockllen, numthreads, size>>>(dop1, rows, cols, temp, dhead, head_size, fres);
|
llenarproyectar<<<blockllen, numthreads, size>>>(dop1, rows, cols, temp, dhead, head_size, fres);
|
||||||
liberar(dhead, head_bytes);
|
liberar(dhead, head_bytes);
|
||||||
@ -260,7 +260,7 @@ int selectproyect(int *dop1, int rows, int cols, int head_size, int *select, int
|
|||||||
tmplen = rows + 1;
|
tmplen = rows + 1;
|
||||||
size2 = tmplen * sizeof(int);
|
size2 = tmplen * sizeof(int);
|
||||||
reservar(&temp, size2);
|
reservar(&temp, size2);
|
||||||
// cerr << "+ " << temp << " temp select " << size2 << endl;
|
// DEBUG_MEM cerr << "+ " << temp << " temp select " << size2 << endl;
|
||||||
cudaMemset(temp, 0, size2);
|
cudaMemset(temp, 0, size2);
|
||||||
|
|
||||||
size = numselfj * sizeof(int);
|
size = numselfj * sizeof(int);
|
||||||
@ -275,7 +275,7 @@ int selectproyect(int *dop1, int rows, int cols, int head_size, int *select, int
|
|||||||
|
|
||||||
size = head_size * sizeof(int);
|
size = head_size * sizeof(int);
|
||||||
reservar(&fres, num * size);
|
reservar(&fres, num * size);
|
||||||
// cerr << "+ " << fres << " fres select again " << num*size << endl;
|
// DEBUG_MEM cerr << "+ " << fres << " fres select again " << num*size << endl;
|
||||||
cudaMemcpy(dhead, project, size, cudaMemcpyHostToDevice);
|
cudaMemcpy(dhead, project, size, cudaMemcpyHostToDevice);
|
||||||
llenarproyectar<<<blockllen, numthreads, size>>>(dop1, rows, cols, temp, dhead, head_size, fres);
|
llenarproyectar<<<blockllen, numthreads, size>>>(dop1, rows, cols, temp, dhead, head_size, fres);
|
||||||
liberar(dhead, head_bytes);
|
liberar(dhead, head_bytes);
|
||||||
@ -287,7 +287,7 @@ int selectproyect(int *dop1, int rows, int cols, int head_size, int *select, int
|
|||||||
{
|
{
|
||||||
size = head_size * sizeof(int);
|
size = head_size * sizeof(int);
|
||||||
reservar(&fres, rows * size);
|
reservar(&fres, rows * size);
|
||||||
// cerr << "+ " << fres << " fres select third " << rows*size << endl;
|
// DEBUG_MEM cerr << "+ " << fres << " fres select third " << rows*size << endl;
|
||||||
cudaMemcpy(dhead, project, size, cudaMemcpyHostToDevice);
|
cudaMemcpy(dhead, project, size, cudaMemcpyHostToDevice);
|
||||||
proyectar<<<blockllen, numthreads, size>>>(dop1, rows, cols, dhead, head_size, fres);
|
proyectar<<<blockllen, numthreads, size>>>(dop1, rows, cols, dhead, head_size, fres);
|
||||||
liberar(dhead, head_bytes);
|
liberar(dhead, head_bytes);
|
||||||
|
@ -780,9 +780,9 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
int dconsize = sizet * 2;*/
|
int dconsize = sizet * 2;*/
|
||||||
|
|
||||||
reservar(&dcons, sizet);
|
reservar(&dcons, sizet);
|
||||||
// cerr << "+ " << dcons << " dcons tree " << sizet << endl;
|
// DEBUG_MEM cerr << "+ " << dcons << " dcons tree " << sizet << endl;
|
||||||
reservar(&temp, sizet2);
|
reservar(&temp, sizet2);
|
||||||
// cerr << "+ " << temp << " temp tree " << sizet2 << endl;
|
// DEBUG_MEM cerr << "+ " << temp << " temp tree " << sizet2 << endl;
|
||||||
thrust::device_ptr<int> res = thrust::device_pointer_cast(temp);
|
thrust::device_ptr<int> res = thrust::device_pointer_cast(temp);
|
||||||
|
|
||||||
numthreads = 1024;
|
numthreads = 1024;
|
||||||
@ -857,9 +857,9 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
|
|
||||||
memSizeS = newLen * sizeof(int);
|
memSizeS = newLen * sizeof(int);
|
||||||
reservar(&d_S, memSizeS);
|
reservar(&d_S, memSizeS);
|
||||||
// cerr << "+ " << d_S << " d_S " << memSizeS << endl;
|
// DEBUG_MEM cerr << "+ " << d_S << " d_S " << memSizeS << endl;
|
||||||
reservar(&posS, memSizeS);
|
reservar(&posS, memSizeS);
|
||||||
// cerr << "+ " << posS << " posS " << memSizeS << endl;
|
// DEBUG_MEM cerr << "+ " << posS << " posS " << memSizeS << endl;
|
||||||
llenar<<<blockllen, numthreads>>>(p2, d_S, sLen, of2, wherej[1], temp, posS);
|
llenar<<<blockllen, numthreads>>>(p2, d_S, sLen, of2, wherej[1], temp, posS);
|
||||||
sLen = newLen;
|
sLen = newLen;
|
||||||
}
|
}
|
||||||
@ -880,9 +880,9 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
|
|
||||||
memSizeS = newLen * sizeof(int);
|
memSizeS = newLen * sizeof(int);
|
||||||
reservar(&d_S, memSizeS);
|
reservar(&d_S, memSizeS);
|
||||||
// cerr << "+ " << d_S << " d_S m " << memSizeS << endl;
|
// DEBUG_MEM cerr << "+ " << d_S << " d_S m " << memSizeS << endl;
|
||||||
reservar(&posS, memSizeS);
|
reservar(&posS, memSizeS);
|
||||||
// cerr << "+ " << posS << " posS m " << memSizeS << endl;
|
// DEBUG_MEM cerr << "+ " << posS << " posS m " << memSizeS << endl;
|
||||||
llenar<<<blockllen, numthreads>>>(p2, d_S, sLen, of2, wherej[1], temp, posS);
|
llenar<<<blockllen, numthreads>>>(p2, d_S, sLen, of2, wherej[1], temp, posS);
|
||||||
sLen = newLen;
|
sLen = newLen;
|
||||||
}
|
}
|
||||||
@ -890,7 +890,7 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
{
|
{
|
||||||
memSizeS = sLen * sizeof(int);
|
memSizeS = sLen * sizeof(int);
|
||||||
reservar(&d_S, memSizeS);
|
reservar(&d_S, memSizeS);
|
||||||
// cerr << "+ " << d_S << " d_S n " << memSizeS << endl;
|
// DEBUG_MEM cerr << "+ " << d_S << " d_S n " << memSizeS << endl;
|
||||||
llenarnosel<<<blockllen, numthreads>>>(p2, d_S, sLen, of2, wherej[1]);
|
llenarnosel<<<blockllen, numthreads>>>(p2, d_S, sLen, of2, wherej[1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -938,9 +938,9 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
m32rLen = newLen + extraspace;
|
m32rLen = newLen + extraspace;
|
||||||
sizem32 = m32rLen * sizeof(int);
|
sizem32 = m32rLen * sizeof(int);
|
||||||
reservar(&d_R, sizem32);
|
reservar(&d_R, sizem32);
|
||||||
// cerr << "+ " << d_R << " d_R m " << sizem32 << endl;
|
// DEBUG_MEM cerr << "+ " << d_R << " d_R m " << sizem32 << endl;
|
||||||
reservar(&posR, sizem32);
|
reservar(&posR, sizem32);
|
||||||
// cerr << "+ " << posR << " posR m " << sizem32 << endl;
|
// DEBUG_MEM cerr << "+ " << posR << " posR m " << sizem32 << endl;
|
||||||
cudaMemsetAsync(d_R + newLen, 0x7f, sizextra);
|
cudaMemsetAsync(d_R + newLen, 0x7f, sizextra);
|
||||||
cudaMemsetAsync(posR + newLen, 0x7f, sizextra);
|
cudaMemsetAsync(posR + newLen, 0x7f, sizextra);
|
||||||
llenar<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0], temp, posR);
|
llenar<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0], temp, posR);
|
||||||
@ -966,9 +966,9 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
m32rLen = newLen + extraspace;
|
m32rLen = newLen + extraspace;
|
||||||
sizem32 = m32rLen * sizeof(int);
|
sizem32 = m32rLen * sizeof(int);
|
||||||
reservar(&d_R, sizem32);
|
reservar(&d_R, sizem32);
|
||||||
// cerr << "+ " << d_R << " d_R n " << sizem32 << endl;
|
// DEBUG_MEM cerr << "+ " << d_R << " d_R n " << sizem32 << endl;
|
||||||
reservar(&posR, sizem32);
|
reservar(&posR, sizem32);
|
||||||
// cerr << "+ " << posR << " posR n " << sizem32 << endl;
|
// DEBUG_MEM cerr << "+ " << posR << " posR n " << sizem32 << endl;
|
||||||
cudaMemsetAsync(d_R + newLen, 0x7f, sizextra);
|
cudaMemsetAsync(d_R + newLen, 0x7f, sizextra);
|
||||||
cudaMemsetAsync(posR + newLen, 0x7f, sizextra);
|
cudaMemsetAsync(posR + newLen, 0x7f, sizextra);
|
||||||
llenar<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0], temp, posR);
|
llenar<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0], temp, posR);
|
||||||
@ -978,7 +978,7 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
{
|
{
|
||||||
sizem32 = m32rLen * sizeof(int);
|
sizem32 = m32rLen * sizeof(int);
|
||||||
reservar(&d_R, sizem32);
|
reservar(&d_R, sizem32);
|
||||||
// cerr << "+ " << d_R << " d_R sizem32 " << sizem32 << endl;
|
// DEBUG_MEM cerr << "+ " << d_R << " d_R sizem32 " << sizem32 << endl;
|
||||||
cudaMemsetAsync(d_R + rLen, 0x7f, extraspace * sizeof(int));
|
cudaMemsetAsync(d_R + rLen, 0x7f, extraspace * sizeof(int));
|
||||||
llenarnosel<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0]);
|
llenarnosel<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0]);
|
||||||
}
|
}
|
||||||
@ -989,7 +989,7 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
{
|
{
|
||||||
sizem32 = m32rLen * sizeof(int);
|
sizem32 = m32rLen * sizeof(int);
|
||||||
reservar(&d_R, sizem32);
|
reservar(&d_R, sizem32);
|
||||||
// cerr << "+ " << d_R << " d_R sz " << sizem32 << endl;
|
// DEBUG_MEM cerr << "+ " << d_R << " d_R sz " << sizem32 << endl;
|
||||||
cudaMemsetAsync(d_R + rLen, 0x7f, extraspace * sizeof(int));
|
cudaMemsetAsync(d_R + rLen, 0x7f, extraspace * sizeof(int));
|
||||||
llenarnosel<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0]);
|
llenarnosel<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0]);
|
||||||
}
|
}
|
||||||
@ -1025,7 +1025,7 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
if(posR == NULL)
|
if(posR == NULL)
|
||||||
{
|
{
|
||||||
reservar(&posR, sizem32);
|
reservar(&posR, sizem32);
|
||||||
// cerr << "+ " << posR << " posR m32 " << sizem32 << endl;
|
// DEBUG_MEM cerr << "+ " << posR << " posR m32 " << sizem32 << endl;
|
||||||
permutation = thrust::device_pointer_cast(posR);
|
permutation = thrust::device_pointer_cast(posR);
|
||||||
thrust::sequence(permutation, permutation + m32rLen);
|
thrust::sequence(permutation, permutation + m32rLen);
|
||||||
}
|
}
|
||||||
@ -1093,7 +1093,7 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
|
|
||||||
int *d_locations;
|
int *d_locations;
|
||||||
reservar(&d_locations, memSizeS);
|
reservar(&d_locations, memSizeS);
|
||||||
// cerr << "+ " << d_locations << " d_locs n " << memSizeS << endl;
|
// DEBUG_MEM cerr << "+ " << d_locations << " d_locs n " << memSizeS << endl;
|
||||||
|
|
||||||
dim3 Dbs(THRD_PER_BLCK_search, 1, 1);
|
dim3 Dbs(THRD_PER_BLCK_search, 1, 1);
|
||||||
dim3 Dgs(BLCK_PER_GRID_search, 1, 1);
|
dim3 Dgs(BLCK_PER_GRID_search, 1, 1);
|
||||||
@ -1133,7 +1133,7 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
|
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
|
||||||
resSize = sum * sizepro;
|
resSize = sum * sizepro;
|
||||||
reservar(&d_Rout, resSize);
|
reservar(&d_Rout, resSize);
|
||||||
// cerr << "+ " << d_Rout << " d_Rout n " << resSize << endl;
|
// DEBUG_MEM cerr << "+ " << d_Rout << " d_Rout n " << resSize << endl;
|
||||||
if(numj > 2)
|
if(numj > 2)
|
||||||
{
|
{
|
||||||
cudaMemcpy(dcons + rule->num_columns, wherej + 2, muljoinsize, cudaMemcpyHostToDevice);
|
cudaMemcpy(dcons + rule->num_columns, wherej + 2, muljoinsize, cudaMemcpyHostToDevice);
|
||||||
@ -1148,7 +1148,7 @@ int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>:
|
|||||||
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
|
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
|
||||||
resSize = sum * sizepro;
|
resSize = sum * sizepro;
|
||||||
reservar(&d_Rout, resSize);
|
reservar(&d_Rout, resSize);
|
||||||
// cerr << "+ " << d_Rout << " d_Rout 2 " << resSize << endl;
|
// DEBUG_MEM cerr << "+ " << d_Rout << " d_Rout 2 " << resSize << endl;
|
||||||
if(numj > 2)
|
if(numj > 2)
|
||||||
{
|
{
|
||||||
cudaMemcpy(dcons + projp.y, wherej + 2, muljoinsize, cudaMemcpyHostToDevice);
|
cudaMemcpy(dcons + projp.y, wherej + 2, muljoinsize, cudaMemcpyHostToDevice);
|
||||||
|
Reference in New Issue
Block a user