2020-11-18 19:53:35 +00:00
|
|
|
const csvParse = require('csv-parse/lib/sync');
|
|
|
|
|
2020-11-17 17:24:49 +00:00
|
|
|
function _parseFloat(token) {
|
2020-11-21 12:26:52 +00:00
|
|
|
if (typeof token !== 'string') {
|
2020-11-20 10:02:30 +00:00
|
|
|
return null;
|
2020-11-21 12:26:52 +00:00
|
|
|
}
|
|
|
|
|
2020-11-20 10:02:30 +00:00
|
|
|
token = token.trim();
|
2020-11-21 12:26:52 +00:00
|
|
|
|
|
|
|
if (token === '') {
|
2020-11-20 10:02:30 +00:00
|
|
|
return null;
|
2020-11-21 12:26:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (/^nan$/i.test(token)) {
|
2020-11-20 10:02:30 +00:00
|
|
|
return null;
|
2020-11-21 12:26:52 +00:00
|
|
|
}
|
|
|
|
|
2020-11-18 16:24:28 +00:00
|
|
|
let f = parseFloat(token);
|
2020-11-21 12:26:52 +00:00
|
|
|
|
2020-11-17 17:24:49 +00:00
|
|
|
if (isNaN(f)) {
|
|
|
|
f = parseFloat(token.substring(0, 10));
|
|
|
|
}
|
2020-11-21 12:26:52 +00:00
|
|
|
|
2020-11-17 17:24:49 +00:00
|
|
|
if (isNaN(f)) {
|
|
|
|
f = 0.0;
|
|
|
|
}
|
2020-11-21 12:26:52 +00:00
|
|
|
|
2020-11-17 17:24:49 +00:00
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
2020-11-21 12:26:52 +00:00
|
|
|
function _parseInt(token) {
|
2020-11-20 10:02:30 +00:00
|
|
|
const asFloat = parseFloat(token);
|
2020-11-21 12:26:52 +00:00
|
|
|
if (asFloat !== null) {
|
2020-11-20 10:02:30 +00:00
|
|
|
return Math.floor(asFloat);
|
|
|
|
} else {
|
|
|
|
return asFloat;
|
2020-11-21 12:26:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function _parseString(token) {
|
|
|
|
if (typeof token !== 'string') {
|
2020-11-20 10:02:30 +00:00
|
|
|
return null;
|
2020-11-21 12:26:52 +00:00
|
|
|
}
|
|
|
|
// This time we do not trim -- because we assume that the quoting mechanism
|
|
|
|
// from CSV might have kicked in and we actually want the spacing around the
|
|
|
|
// token.
|
|
|
|
|
|
|
|
if (token === '') {
|
2020-11-20 10:02:30 +00:00
|
|
|
return null;
|
2020-11-21 12:26:52 +00:00
|
|
|
}
|
|
|
|
|
2020-11-20 10:02:30 +00:00
|
|
|
return token;
|
2020-11-21 12:26:52 +00:00
|
|
|
}
|
|
|
|
|
2020-11-18 21:13:00 +00:00
|
|
|
function replaceDollarNewlinesHack(body) {
|
|
|
|
// see if we are using the hack with $ as newlines, replace them for the csv parser
|
|
|
|
if (body.endsWith('$')) {
|
|
|
|
return body.replace(/\$/g, '\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
return body;
|
|
|
|
}
|
|
|
|
|
2020-11-18 19:53:35 +00:00
|
|
|
function addPointsToTrack(trackInfo, body, format = null) {
|
2020-11-18 21:13:00 +00:00
|
|
|
body = replaceDollarNewlinesHack(body);
|
|
|
|
|
2020-11-18 19:53:35 +00:00
|
|
|
const detectedFormat = format != null ? format : detectFormat(body);
|
|
|
|
|
|
|
|
let parser;
|
|
|
|
switch (detectedFormat) {
|
|
|
|
case 'invalid':
|
|
|
|
throw new Error('track format cannot be detected');
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
parser = parseObsver1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
parser = parseObsver2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
const points = trackInfo.trackData.points;
|
|
|
|
for (const newPoint of parser(body)) {
|
|
|
|
points.push(newPoint);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function detectFormat(body) {
|
2020-11-20 10:02:30 +00:00
|
|
|
body = replaceDollarNewlinesHack(body);
|
2020-11-19 16:48:45 +00:00
|
|
|
|
2020-11-18 19:53:35 +00:00
|
|
|
if (!body.length) {
|
|
|
|
return 'invalid';
|
|
|
|
}
|
|
|
|
|
|
|
|
const firstLinebreakIndex = body.indexOf('\n');
|
|
|
|
|
|
|
|
if (firstLinebreakIndex === -1) {
|
2020-11-19 16:48:45 +00:00
|
|
|
// We need at least one linebreak in the whole file, to separate header and
|
|
|
|
// data. If the file contains no header, it is in valid.
|
2020-11-20 10:02:30 +00:00
|
|
|
return 'invalid';
|
2020-11-18 19:53:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const firstLine = body.substring(0, firstLinebreakIndex);
|
|
|
|
|
|
|
|
const match = firstLine.match(/(^|&)OBSDataFormat=([\d]+)($|&)/);
|
|
|
|
if (match) {
|
|
|
|
return Number(match[2]);
|
|
|
|
}
|
|
|
|
|
2020-11-21 18:36:27 +00:00
|
|
|
// If we have no metadata line, but start immediately with a header, AND it contains
|
|
|
|
// `;Rus`, it is a version 2
|
|
|
|
if (/^Date;Time.*;Rus/.test(firstLine)) {
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
2020-11-19 16:48:45 +00:00
|
|
|
// If we have no metadata line, but start immediately with a header, it is
|
|
|
|
// format version 1.
|
2020-11-18 20:51:08 +00:00
|
|
|
if (/^Date;Time/.test(firstLine)) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-11-21 12:15:49 +00:00
|
|
|
// If we immediately start with data (a date, formatted as DD.MM.YYYY), then
|
|
|
|
// we have an old OBS not sending the header. It must therefore be old
|
|
|
|
// format, too.
|
|
|
|
if (/^[0-9]{2}\.[0-9]{2}\.[0-9]{4};/.test(firstLine)) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-11-18 19:53:35 +00:00
|
|
|
return 'invalid';
|
|
|
|
}
|
|
|
|
|
|
|
|
function* parseObsver1(body) {
|
2020-11-18 20:51:08 +00:00
|
|
|
for (const record of csvParse(body, {
|
|
|
|
delimiter: ';',
|
|
|
|
encoding: 'utf8',
|
2020-11-21 12:26:52 +00:00
|
|
|
// We specify different column names here, as the order of columns was
|
|
|
|
// always the same, but their naming was different. By enforicing these
|
|
|
|
// column names we don't have to translate between them. Then we just
|
|
|
|
// ignore the first line (or any line that starts with "Date;").
|
|
|
|
// Original header usually is:
|
|
|
|
// Date;Time;Latitude;Longitude;Course;Speed;Right;Left;Confirmed;insidePrivacyArea
|
2020-11-18 20:51:08 +00:00
|
|
|
columns: ['date', 'time', 'latitude', 'longitude', 'course', 'speed', 'd1', 'd2', 'flag', 'private'],
|
|
|
|
relax_column_count: true,
|
|
|
|
cast(value, { column }) {
|
2020-11-21 12:26:52 +00:00
|
|
|
if (['latitude', 'longitude', 'course', 'speed'].includes(column)) {
|
2020-11-18 20:51:08 +00:00
|
|
|
return _parseFloat(value);
|
2020-11-21 12:26:52 +00:00
|
|
|
} else if (['d1', 'd2', 'flag'].includes(column)) {
|
|
|
|
return _parseInt(value);
|
|
|
|
} else if (column === 'private') {
|
|
|
|
return Boolean(_parseInt(value));
|
2020-11-18 20:51:08 +00:00
|
|
|
} else {
|
2020-11-21 12:26:52 +00:00
|
|
|
return _parseString(value);
|
2020-11-17 16:59:05 +00:00
|
|
|
}
|
2020-11-18 20:51:08 +00:00
|
|
|
},
|
|
|
|
})) {
|
|
|
|
if (record.date === 'Date') {
|
|
|
|
// ignore header line
|
|
|
|
continue;
|
2020-11-17 16:59:05 +00:00
|
|
|
}
|
2020-11-18 20:51:08 +00:00
|
|
|
|
2020-11-21 12:48:22 +00:00
|
|
|
if (!record.latitude && !record.longitude) {
|
|
|
|
// invalid record, make sure lat/lng say `null` instead of `0`
|
2020-11-20 10:02:30 +00:00
|
|
|
record.latitude = null;
|
|
|
|
record.longitude = null;
|
2020-11-21 12:48:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// in old format, 255 or 999 means "no measurement"
|
|
|
|
if (record.d1 === 255 || record.d1 === 999) {
|
2020-11-20 10:02:30 +00:00
|
|
|
record.d1 = null;
|
2020-11-21 12:48:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (record.d2 === 255 || record.d2 === 999) {
|
2020-11-20 10:02:30 +00:00
|
|
|
record.d2 = null;
|
2020-11-21 12:48:22 +00:00
|
|
|
}
|
|
|
|
|
2020-11-18 20:51:08 +00:00
|
|
|
yield record;
|
2020-11-18 19:53:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function* parseObsver2(body) {
|
|
|
|
for (const record of csvParse(body, {
|
|
|
|
from_line: 2,
|
|
|
|
trim: true,
|
|
|
|
columns: true,
|
|
|
|
skip_empty_lines: true,
|
|
|
|
delimiter: ';',
|
|
|
|
encoding: 'utf8',
|
|
|
|
relax_column_count: true,
|
|
|
|
cast(value, context) {
|
|
|
|
if (value === '') {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
let type;
|
|
|
|
switch (context.column) {
|
|
|
|
case 'Millis':
|
|
|
|
case 'Left':
|
|
|
|
case 'Right':
|
|
|
|
case 'Confirmed':
|
|
|
|
case 'Invalid':
|
|
|
|
case 'InsidePrivacyArea':
|
|
|
|
case 'Measurements':
|
|
|
|
case 'Satellites':
|
|
|
|
type = 'int';
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'Date':
|
|
|
|
case 'Time':
|
|
|
|
case 'Comment':
|
|
|
|
case 'Marked':
|
|
|
|
type = 'string';
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'Latitude':
|
|
|
|
case 'Longitude':
|
|
|
|
case 'Altitude':
|
|
|
|
case 'Course':
|
|
|
|
case 'Speed':
|
|
|
|
case 'HDOP':
|
|
|
|
case 'BatteryLevel':
|
|
|
|
case 'Factor':
|
|
|
|
type = 'float';
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
type = /^(Tms|Lus|Rus)/.test(context.column) ? 'int' : 'string';
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case 'int':
|
2020-11-21 18:36:27 +00:00
|
|
|
return _parseInt(value);
|
2020-11-18 19:53:35 +00:00
|
|
|
|
|
|
|
case 'float':
|
2020-11-21 18:36:27 +00:00
|
|
|
return _parseFloat(value);
|
2020-11-18 19:53:35 +00:00
|
|
|
|
|
|
|
case 'string':
|
2020-11-21 18:36:27 +00:00
|
|
|
return _parseString(value);
|
2020-11-18 19:53:35 +00:00
|
|
|
}
|
|
|
|
},
|
|
|
|
})) {
|
|
|
|
// We convert the new format back to the old format for storage here, until
|
|
|
|
// we upgrade the storage format as well to include all data. But we'll
|
|
|
|
// have to upgrade the obsApp first.
|
|
|
|
yield {
|
|
|
|
date: record.Date,
|
|
|
|
time: record.Time,
|
|
|
|
latitude: record.Latitude,
|
|
|
|
longitude: record.Longitude,
|
|
|
|
course: record.Course,
|
|
|
|
speed: record.Speed,
|
|
|
|
d1: record.Left,
|
|
|
|
d2: record.Right,
|
|
|
|
flag: Boolean(record.Confirmed),
|
|
|
|
private: Boolean(record.InsidePrivacyArea),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
module.exports = { addPointsToTrack, detectFormat, parseObsver1, parseObsver2 };
|