add feature
This commit is contained in:
parent
77b6e0c14f
commit
639ae0cd8e
4
.gitignore
vendored
4
.gitignore
vendored
@ -1 +1,3 @@
|
|||||||
.venv
|
.venv
|
||||||
|
|
||||||
|
__pycache__
|
20
dat_2_pg.py
20
dat_2_pg.py
@ -1,4 +1,5 @@
|
|||||||
import psycopg
|
import psycopg
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
from read_dat import read_binary_file
|
from read_dat import read_binary_file
|
||||||
|
|
||||||
@ -7,22 +8,23 @@ df = read_binary_file(file_path)
|
|||||||
|
|
||||||
# PostgreSQL连接配置
|
# PostgreSQL连接配置
|
||||||
postgres_config = {
|
postgres_config = {
|
||||||
"host": "192.168.3.12",
|
"host": "111.231.71.81",
|
||||||
"user": "prepare",
|
"user": "lzflood",
|
||||||
"password": "preparepassword",
|
"password": "lzfloodpassword",
|
||||||
"dbname": "preparedb",
|
"dbname": "lzflooddb",
|
||||||
}
|
}
|
||||||
|
|
||||||
postgres_conn = psycopg.connect(**postgres_config)
|
postgres_conn = psycopg.connect(**postgres_config)
|
||||||
postgres_cursor = postgres_conn.cursor()
|
postgres_cursor = postgres_conn.cursor()
|
||||||
|
|
||||||
for index, row in df.iterrows():
|
for index, row in df.iterrows():
|
||||||
|
current_date = datetime(2010, 7, 20) + timedelta(hours=index)
|
||||||
for i in range(len(row)):
|
for i in range(len(row)):
|
||||||
postgres_cursor.execute(
|
postgres_cursor.execute(
|
||||||
"INSERT INTO river_flow (river_id, created_at, runoff) VALUES (%s, %s, %s)",
|
"INSERT INTO river_runoff (river_id, created_at, value) VALUES (%s, %s, %s)",
|
||||||
(i + 1, index, row[i]),
|
(i + 1, current_date, row[i] / 86400),
|
||||||
)
|
)
|
||||||
|
print("index, i", index, i)
|
||||||
postgres_conn.commit()
|
postgres_conn.commit()
|
||||||
postgres_cursor.close()
|
postgres_cursor.close()
|
||||||
postgres_conn.close()
|
postgres_conn.close()
|
||||||
|
82
read_dat.py
82
read_dat.py
@ -1,3 +1,4 @@
|
|||||||
|
from datetime import datetime, timedelta
|
||||||
import struct
|
import struct
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
@ -19,23 +20,31 @@ def read_binary_file(file_path):
|
|||||||
|
|
||||||
# 读取接下来的8个字节,表示日期
|
# 读取接下来的8个字节,表示日期
|
||||||
date_bytes = file.read(8)
|
date_bytes = file.read(8)
|
||||||
date = struct.unpack("<d", date_bytes)[0]
|
date_float = struct.unpack("<d", date_bytes)[0]
|
||||||
print(f"Date: {date}")
|
date_str = str(int(date_float))
|
||||||
|
year = int(date_str[:4])
|
||||||
|
month = int(date_str[4:6])
|
||||||
|
day = int(date_str[6:8])
|
||||||
|
# start_date = datetime(year, month, day)
|
||||||
|
# print(f"Date: {datetime(year, month, day)}")
|
||||||
|
|
||||||
# 第一个时间戳所有数据
|
# 第一个时间戳所有数据
|
||||||
|
# time_bytes = file.read(8)
|
||||||
|
# time = struct.unpack("<d", time_bytes)[0]
|
||||||
|
# print(f"Time: {time}")
|
||||||
|
# hours = int(time / 60)
|
||||||
|
# first_date = start_date + timedelta(hours=hours)
|
||||||
|
|
||||||
time_bytes = file.read(8)
|
# print(f"Minute: {minute}")
|
||||||
time = struct.unpack("<d", time_bytes)[0]
|
# data_size = column_count_int * 8 # 每个数据点8个字节
|
||||||
print(f"Time: {time}")
|
# file.seek(data_size, 1)
|
||||||
data_size = column_count_int * 8 # 每个数据点8个字节
|
# time1_bytes = file.read(8)
|
||||||
file.seek(data_size, 1)
|
# time1 = struct.unpack("<d", time1_bytes)[0]
|
||||||
time1_bytes = file.read(8)
|
# print(f"Time: {time1}")
|
||||||
time1 = struct.unpack("<d", time1_bytes)[0]
|
# file.seek(data_size, 1)
|
||||||
print(f"Time: {time1}")
|
# time2_bytes = file.read(8)
|
||||||
file.seek(data_size, 1)
|
# time2 = struct.unpack("<d", time2_bytes)[0]
|
||||||
time2_bytes = file.read(8)
|
# print(f"Time: {time2}")
|
||||||
time2 = struct.unpack("<d", time2_bytes)[0]
|
|
||||||
print(f"Time: {time2}")
|
|
||||||
# for i in range(column_count_int):
|
# for i in range(column_count_int):
|
||||||
# print(i)
|
# print(i)
|
||||||
# if i > 10:
|
# if i > 10:
|
||||||
@ -43,29 +52,30 @@ def read_binary_file(file_path):
|
|||||||
# data_bytes = file.read(8)
|
# data_bytes = file.read(8)
|
||||||
# data = struct.unpack("<d", data_bytes)[0]
|
# data = struct.unpack("<d", data_bytes)[0]
|
||||||
# print(f"Data: {data}")
|
# print(f"Data: {data}")
|
||||||
# while True:
|
while True:
|
||||||
# # print("index", index)
|
# print("index", index)
|
||||||
# if index > 335:
|
if index > 335:
|
||||||
# break
|
break
|
||||||
# time_bytes = file.read(8)
|
time_bytes = file.read(8)
|
||||||
# time = struct.unpack("<d", time_bytes)[0]
|
time = struct.unpack("<d", time_bytes)[0]
|
||||||
# # print(f"time: {time}")
|
# print(f"time: {time}")
|
||||||
|
|
||||||
# # 读取数据矩阵
|
|
||||||
# data_size = column_count_int * 8 # 每个数据点8个字节
|
|
||||||
# data_bytes = file.read(data_size)
|
|
||||||
# data_matrix = struct.unpack(f"<{column_count_int}d", data_bytes)
|
|
||||||
# data_matrix = [data_matrix]
|
|
||||||
# new_df = pd.DataFrame(data_matrix)
|
|
||||||
# df = pd.concat([df, new_df], ignore_index=True)
|
|
||||||
# # print(f"Data Matrix: {data_matrix}")
|
|
||||||
# index = index + 1
|
|
||||||
|
|
||||||
|
# 读取数据矩阵
|
||||||
|
data_size = column_count_int * 8 # 每个数据点8个字节
|
||||||
|
data_bytes = file.read(data_size)
|
||||||
|
data_matrix = struct.unpack(f"<{column_count_int}d", data_bytes)
|
||||||
|
data_matrix = [data_matrix]
|
||||||
|
new_df = pd.DataFrame(data_matrix)
|
||||||
|
df = pd.concat([df, new_df], ignore_index=True)
|
||||||
|
# print(f"Data Matrix: {data_matrix}")
|
||||||
|
index = index + 1
|
||||||
|
print("index", index)
|
||||||
return df
|
return df
|
||||||
|
|
||||||
|
|
||||||
# 使用函数读取文件
|
if __name__ == "__main__":
|
||||||
file_path = "lz.out/lz.rivqdown.dat"
|
# 使用函数读取文件
|
||||||
df = read_binary_file(file_path)
|
file_path = "lz.out/lz.rivqdown.dat"
|
||||||
print(df.shape)
|
df = read_binary_file(file_path)
|
||||||
print(df.head())
|
print(df.shape)
|
||||||
|
print(df.head())
|
||||||
|
@ -1,4 +1,2 @@
|
|||||||
pandas
|
pandas
|
||||||
gdal
|
|
||||||
geopandas
|
|
||||||
psycopg[binary]
|
psycopg[binary]
|
Loading…
x
Reference in New Issue
Block a user