Skip to content

Commit

Permalink
fix influxdb method
Browse files Browse the repository at this point in the history
  • Loading branch information
m4dm4rtig4n committed Dec 5, 2022
1 parent 5b538e9 commit f6e28e7
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 14 deletions.
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ Un template est disponible sur le repo [config.yaml](https://github.com/m4dm4rti
| discovery_prefix | Préfixe configuré dans Home Assistant pour l'auto-discovery | homeassistant |

### influxdb

> Version supportée minimun 1.8
| Champs | Information | Défaut |
|------------------|---------------------------------------------------------------|------------------|
| enable | Activation ou non des exports vers InfluxDB | False |
Expand All @@ -123,7 +126,7 @@ Ces 2 propriétés vont vous permettre de jouer sur la rapidité d'importation d

#### Configuration par version :

##### v1.X :
##### v1.8 :
```yaml
influxdb:
enable: 'true'
Expand Down
11 changes: 8 additions & 3 deletions app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,14 @@
INFLUXB_ENABLE = False
INFLUXDB = None

if "asynchronous" in INFLUXDB_CONFIG and str2bool(INFLUXDB_CONFIG["asynchronous"]):
write_options = "ASYNCHRONOUS"
if "method" in INFLUXDB_CONFIG:
method = INFLUXDB_CONFIG["method"]
else:
write_options = "SYNCHRONOUS"
method = "SYNCHRONOUS"

write_options = []
if "batching_options" in INFLUXDB_CONFIG:
write_options = INFLUXDB_CONFIG["batching_options"]

if INFLUXDB_CONFIG and "enable" in INFLUXDB_CONFIG and INFLUXDB_CONFIG["enable"]:
INFLUXB_ENABLE = True
Expand All @@ -80,6 +84,7 @@
token=INFLUXDB_CONFIG["token"],
org=INFLUXDB_CONFIG["org"],
bucket=INFLUXDB_CONFIG["bucket"],
method=method,
write_options=write_options
)
if CONFIG.get("wipe_influxdb"):
Expand Down
1 change: 0 additions & 1 deletion app/models/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,6 @@ def set_usage_point(self, usage_point_id, data):
self.session.add(
UsagePoints(
usage_point_id=usage_point_id,
# TODO : Erreur si name est vide
name=name,
cache=str2bool(cache),
consumption=str2bool(consumption),
Expand Down
8 changes: 5 additions & 3 deletions app/models/influxdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

class InfluxDB:

def __init__(self, hostname, port, token, org="myelectricaldata.fr", bucket="myelectricaldata", method="batching",
def __init__(self, hostname, port, token, org="myelectricaldata.fr", bucket="myelectricaldata", method="SYNCHRONOUS",
write_options=None):
if write_options is None:
write_options = {}
Expand Down Expand Up @@ -66,8 +66,8 @@ def __init__(self, hostname, port, token, org="myelectricaldata.fr", bucket="mye
self.get_list_retention_policies()
if self.retention < 94608000:
day = int(self.retention / 60 / 60 / 24)
app.LOG.log([
f" => ATTENTION, l'InfluxDB est configuré avec une durée de retention de {day} jours.",
app.LOG.warning([
f"<!> ATTENTION, l'InfluxDB est configuré avec une durée de retention de {day} jours.",
f" Toutes les données supérieurs à {day} jours ne seront jamais inséré dans celui-ci."
])

Expand All @@ -93,7 +93,9 @@ def connect(self):
"https://github.com/m4dm4rtig4n/enedisgateway2mqtt#configuration-file"
])

app.LOG.log(f" => Methode d'importation : {self.method.upper()}")
if self.method.upper() == "ASYNCHRONOUS":
app.LOG.warning("<!> ATTENTION, le mode d'importation \"ASYNCHRONOUS\" est très consommateur de ressources système.")
self.write_api = self.influxdb.write_api(write_options=ASYNCHRONOUS)
elif self.method.upper() == "SYNCHRONOUS":
self.write_api = self.influxdb.write_api(write_options=SYNCHRONOUS)
Expand Down
2 changes: 0 additions & 2 deletions app/models/query_daily.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,6 @@ def run(self, begin, end):
app.LOG.log(f" => Chargement des données depuis MyElectricalData {begin_str} => {end_str}")
data = Query(endpoint=f"{self.url}/{endpoint}/", headers=self.headers).get()
blacklist = 0
from pprint import pprint
pprint(data.text)
if hasattr(data, "status_code"):
if data.status_code == 200:
meter_reading = json.loads(data.text)['meter_reading']
Expand Down
16 changes: 12 additions & 4 deletions app/models/query_detail.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,15 +112,23 @@ def run(self, begin, end):
result = is_between(dateHourMinute, (offpeak_begin, offpeak_stop))
if result:
measure_type = "HC"
bulk_insert.append(self.detail_table(
self.db.insert_detail(
usage_point_id=self.usage_point_id,
date=date,
value=value,
interval=interval,
measure_type=measure_type,
blacklist=0
))
self.db.insert_detail_bulk(bulk_insert, self.measure_type)
blacklist=0,
)
# bulk_insert.append(self.detail_table(
# usage_point_id=self.usage_point_id,
# date=date,
# value=value,
# interval=interval,
# measure_type=measure_type,
# blacklist=0
# ))
# self.db.insert_detail_bulk(bulk_insert, self.measure_type)
return meter_reading["interval_reading"]
else:
return {
Expand Down

0 comments on commit f6e28e7

Please sign in to comment.