forked from ddrilling/AsbCloudServer
1-st try
This commit is contained in:
parent
c891c78782
commit
ea1eb20f82
@ -16,7 +16,7 @@ namespace AsbCloudInfrastructure.Services.SAUB
|
||||
where TDto : AsbCloudApp.Data.ITelemetryData
|
||||
where TModel : class, AsbCloudDb.Model.ITelemetryData
|
||||
{
|
||||
private readonly IAsbCloudDbContext db;
|
||||
protected readonly IAsbCloudDbContext db;
|
||||
private readonly ITelemetryService telemetryService;
|
||||
protected readonly CacheTable<Telemetry> cacheTelemetry;
|
||||
protected readonly CacheTable<TelemetryUser> cacheTelemetryUsers;
|
||||
@ -170,5 +170,16 @@ namespace AsbCloudInfrastructure.Services.SAUB
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Прореживание данных телеметрии.
|
||||
/// Каждая ratio запись будет сохранена, остальные удаляются. Остаются (Row_number % ratio) = 0.
|
||||
/// Из-за возможности запуска повторного прореживания можно ограничить величину разрыва по времени параметром maxDateGapSec.
|
||||
/// </summary>
|
||||
/// <param name="idTelemetry">Id телеметрии</param>
|
||||
/// <param name="ratio">желаемое отношение оставшихся записей к исходным</param>
|
||||
/// <param name="maxDateGapSec">ограничение разрыва по времени</param>
|
||||
/// <returns></returns>
|
||||
public abstract Task<(int oldCount, int newCount)> RediceSamplingAsync(int idTelemetry, int ratio, int maxDateGapSec, CancellationToken token);
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,12 @@
|
||||
using AsbCloudApp.Services;
|
||||
using AsbCloudDb.Model;
|
||||
using AsbCloudInfrastructure.Services.Cache;
|
||||
using DocumentFormat.OpenXml.Drawing.Charts;
|
||||
using Mapster;
|
||||
using Microsoft.EntityFrameworkCore;
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace AsbCloudInfrastructure.Services.SAUB
|
||||
{
|
||||
@ -34,5 +39,44 @@ namespace AsbCloudInfrastructure.Services.SAUB
|
||||
dto.DateTime = src.DateTime.ToRemoteDateTime(timezoneOffset);
|
||||
return dto;
|
||||
}
|
||||
|
||||
public override async Task<(int oldCount, int newCount)> RediceSamplingAsync(int idTelemetry, int ratio, int maxDateGapSec, CancellationToken token)
|
||||
{
|
||||
const int ramLimit = 50 * 1024 * 1024;
|
||||
const int dataItemSize = 345; // by profiler
|
||||
const int itemsCountLimit = ramLimit / dataItemSize; // ~ 150_000, iterations count ~ 46
|
||||
const int maxWellDepthGap = 1;
|
||||
|
||||
var dbset = db.Set<TelemetryDataSaub>();
|
||||
|
||||
var sql =
|
||||
"select" +
|
||||
" *" +
|
||||
"from" +
|
||||
" (select" +
|
||||
" *," +
|
||||
" rank() over win1 as row_num," +
|
||||
" lag(\"date\", 1) over win1 as lag_date," +
|
||||
" lag(\"mode\", 1) over win1 as lag_mode," +
|
||||
" lag(mse_state, 1) over win1 as lag_mse_state," +
|
||||
" lag(well_depth, 1) over win1 as lag_well_depth," +
|
||||
" lag(id_feed_regulator, 1) over win1 as lag_id_feed_regulator" +
|
||||
" from t_telemetry_data_saub" +
|
||||
$" where id_telemetry = {idTelemetry}" +
|
||||
" window win1 as (order by \"date\")" +
|
||||
" ) as ttt" +
|
||||
"where" +
|
||||
$" (row_num % {ratio}) = 0" +
|
||||
" or \"mode\" != lag_mode" +
|
||||
$" or(\"date\" - lag_date) > interval '{maxDateGapSec} second'" +
|
||||
$" or well_depth - lag_well_depth > {maxWellDepthGap}" +
|
||||
" or mse_state != lag_mse_state" +
|
||||
" or id_feed_regulator != lag_id_feed_regulator;";
|
||||
|
||||
var query = dbset.FromSqlRaw(sql);
|
||||
|
||||
await Task.Delay(0);
|
||||
return (0, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,8 @@ using AsbCloudApp.Services;
|
||||
using AsbCloudDb.Model;
|
||||
using AsbCloudInfrastructure.Services.Cache;
|
||||
using Mapster;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace AsbCloudInfrastructure.Services.SAUB
|
||||
{
|
||||
@ -28,5 +30,10 @@ namespace AsbCloudInfrastructure.Services.SAUB
|
||||
dto.DateTime = src.DateTime.ToRemoteDateTime(timezoneOffset);
|
||||
return dto;
|
||||
}
|
||||
|
||||
public override Task<(int oldCount, int newCount)> RediceSamplingAsync(int idTelemetry, int ratio, int maxDateGapSec, CancellationToken token)
|
||||
{
|
||||
throw new System.NotImplementedException();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ using Microsoft.Extensions.Configuration;
|
||||
using Microsoft.Extensions.DependencyInjection;
|
||||
using Microsoft.Extensions.Hosting;
|
||||
using System;
|
||||
using System.Linq;
|
||||
|
||||
namespace AsbCloudInfrastructure
|
||||
{
|
||||
|
@ -4,6 +4,8 @@
|
||||
<TargetFramework>net6.0</TargetFramework>
|
||||
|
||||
<IsPackable>false</IsPackable>
|
||||
|
||||
<Nullable>enable</Nullable>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
|
@ -4,6 +4,7 @@
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>net6.0</TargetFramework>
|
||||
<StartupObject>ConsoleApp1.Program</StartupObject>
|
||||
<Nullable>enable</Nullable>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
|
@ -1,72 +1,104 @@
|
||||
using AsbCloudApp.Data;
|
||||
using AsbCloudApp.Data.DailyReport;
|
||||
using AsbCloudDb.Model;
|
||||
using AsbCloudInfrastructure.Services.DailyReport;
|
||||
using ClosedXML.Excel;
|
||||
using DocumentFormat.OpenXml.Wordprocessing;
|
||||
using Microsoft.EntityFrameworkCore;
|
||||
using System;
|
||||
using System.IO;
|
||||
|
||||
using System.Linq;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace ConsoleApp1
|
||||
{
|
||||
|
||||
class Program
|
||||
{
|
||||
private static AsbCloudDbContext db = ServiceFactory.Context;
|
||||
|
||||
public static async Task<(int oldCount, int newCount)> RediceSamplingAsync(int idTelemetry, int ratio, int maxDateGapSec, CancellationToken token)
|
||||
{
|
||||
const int ramLimit = 30 * 1024 * 1024;
|
||||
const int dataItemSize = 345; // by profiler
|
||||
const int itemsCountLimit = ramLimit / dataItemSize; // ~ 90_000
|
||||
const int maxWellDepthGap = 1;
|
||||
|
||||
ratio = ratio > 0 ? ratio : 5;
|
||||
maxDateGapSec = maxDateGapSec > 0 ? maxDateGapSec : 9;
|
||||
|
||||
var dbset = db.Set<TelemetryDataSaub>();
|
||||
|
||||
var sqlSelect =
|
||||
"select " +
|
||||
" * " +
|
||||
"from " +
|
||||
" (select " +
|
||||
" *, " +
|
||||
" rank() over win1 as row_num, " +
|
||||
" lag(\"date\", 1) over win1 as lag_date, " +
|
||||
" lag(\"mode\", 1) over win1 as lag_mode, " +
|
||||
" lag(mse_state, 1) over win1 as lag_mse_state, " +
|
||||
" lag(well_depth, 1) over win1 as lag_well_depth, " +
|
||||
" lag(id_feed_regulator, 1) over win1 as lag_id_feed_regulator " +
|
||||
" from t_telemetry_data_saub " +
|
||||
$" where id_telemetry = {idTelemetry} and \"date\" > {{0}}" +
|
||||
" window win1 as (order by \"date\") " +
|
||||
" ) as ttt " +
|
||||
"where " +
|
||||
$" (row_num % {ratio}) = 0 " +
|
||||
" or \"mode\" != lag_mode " +
|
||||
$" or(\"date\" - lag_date) > interval '{maxDateGapSec} second' " +
|
||||
$" or well_depth - lag_well_depth > {maxWellDepthGap} " +
|
||||
" or mse_state != lag_mse_state " +
|
||||
" or id_feed_regulator != lag_id_feed_regulator " +
|
||||
"order by \"date\" ";
|
||||
|
||||
var sqlDelete = "delete " +
|
||||
"from t_telemetry_data_saub " +
|
||||
$"where id_telemetry = {idTelemetry} and \"date\" between {{0}} and {{1}};";
|
||||
|
||||
var startDate = DateTimeOffset.MinValue;
|
||||
var iteration = 0;
|
||||
var deleted = 0;
|
||||
var saved = 0;
|
||||
do
|
||||
{
|
||||
var query = dbset
|
||||
.FromSqlRaw(sqlSelect, startDate)
|
||||
.AsNoTracking();
|
||||
|
||||
var data = await query
|
||||
.Take(itemsCountLimit)
|
||||
.ToArrayAsync(token);
|
||||
|
||||
var currentDataCount = data.Length;
|
||||
if (currentDataCount == 0)
|
||||
break;
|
||||
|
||||
var lastDate = data.Last().DateTime;
|
||||
|
||||
var currentDeleted = await db.Database.ExecuteSqlRawAsync(sqlDelete, new object[]{ startDate, lastDate}.AsEnumerable(), token);
|
||||
if (currentDeleted == 0)
|
||||
break;
|
||||
|
||||
dbset.AddRange(data);
|
||||
await db.SaveChangesAsync(token);
|
||||
|
||||
startDate = lastDate;
|
||||
deleted += currentDeleted;
|
||||
saved += currentDataCount;
|
||||
iteration++;
|
||||
} while (true);
|
||||
|
||||
return (deleted, saved);
|
||||
}
|
||||
|
||||
// use ServiceFactory to make services
|
||||
static void Main(/*string[] args*/)
|
||||
{
|
||||
|
||||
|
||||
var block = new HeadDto()
|
||||
{
|
||||
AzimuthAngle = 12,
|
||||
WellName = "WellName",
|
||||
ClusterName = "clusterName",
|
||||
Customer = "customer",
|
||||
Contractor = "Contractor",
|
||||
ReportDate = DateTime.Now,
|
||||
WellDepthIntervalFinishDate = 27.5,
|
||||
WellDepthIntervalStartDate = 26.5,
|
||||
BottomholeDepth = 66.6
|
||||
};
|
||||
var block2 = new BhaDto()
|
||||
{
|
||||
BHADescription = "sadasdasdasdasdasdjlaskjdaksjdlasdlalskdklj"
|
||||
};
|
||||
var block3 = new SaubDto();
|
||||
var bloks = new DailyReportDto()
|
||||
{
|
||||
Head = block,
|
||||
Saub = block3
|
||||
};
|
||||
|
||||
|
||||
|
||||
var service = new DailyReportMakerExcel();
|
||||
var stream = service.MakeReportFromBlocks(bloks);
|
||||
var filename = "____.xlsx";
|
||||
if (File.Exists(filename))
|
||||
File.Delete(filename);
|
||||
using var fileStream = File.OpenWrite(filename);
|
||||
stream.CopyTo(fileStream);
|
||||
|
||||
return;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
//var ms = MakeReportFromBlocks(block,block3);
|
||||
////File.Create("", MakeReportFromBlocks(block));
|
||||
//using var file = new FileStream("file.xlsx", FileMode.Create, System.IO.FileAccess.Write);
|
||||
//byte[] bytes = new byte[ms.Length];
|
||||
//ms.Read(bytes, 0, (int)ms.Length);
|
||||
//file.Write(bytes, 0, bytes.Length);
|
||||
//ms.Close();
|
||||
RediceSamplingAsync(183, 5, 10, CancellationToken.None).Wait();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user