-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
7782314
commit 626dc98
Showing
27 changed files
with
659 additions
and
77 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File renamed without changes.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
from pyvrml.gitlab.gitlab import gitlab_auth | ||
from pyvrml.http.httputils import get | ||
|
||
git_token = "token" | ||
|
||
# 监控gitlab项目pipeline执行情况 | ||
if __name__ == '__main__': | ||
# 登录gitlab | ||
gl = gitlab_auth() | ||
|
||
projects_name = ['test'] | ||
for project_name in projects_name: | ||
|
||
projects = gl.projects.list(search=project_name) | ||
for project in projects: | ||
print("[project]: " + str(project)) | ||
search_project_name = project.name | ||
if search_project_name != project_name: | ||
# 过滤非精确匹配的项目 | ||
continue | ||
|
||
pipelines = project.pipelines.list(page=1, per_page=1) | ||
for pipeline in pipelines: | ||
# print("[pipeline1]: " + str(pipeline)) | ||
|
||
pipeline = project.pipelines.get(pipeline.id) | ||
print("[pipeline]: " + str(pipeline)) | ||
|
||
user_name = pipeline.user.get('name') | ||
web_url = pipeline.web_url | ||
status = pipeline.status | ||
# pipeline未通过 | ||
if status != "success": | ||
print(f"[{project_name}] pipeline健康检查失败!!!") | ||
|
||
# pipeline通过 | ||
else: | ||
commit_id = pipeline.sha | ||
test_res = get( | ||
url=f"http://git.com/group/{project_name}/commit/{commit_id}/pipeline_reports.json?type=test", | ||
params=None, | ||
headers={'PRIVATE-TOKEN': git_token}) | ||
print("[test]: " + str(test_res)) | ||
|
||
if test_res: | ||
summary = test_res.get("summary") | ||
if summary: | ||
total = summary.get("total") | ||
resolved = summary.get("resolved") | ||
failed = summary.get("failed") | ||
error = summary.get("error") | ||
if failed > 0 or error > 0: | ||
print(f"[{project_name}] test健康检查未通过!!!") | ||
else: | ||
print(f"[{project_name}] test健康检查通过~~~") | ||
else: | ||
print(f"[{project_name}] test健康检查获取失败!!!") |
Empty file.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,220 @@ | ||
import matplotlib | ||
import numpy as np | ||
import tensorflow as tf | ||
from tensorflow.contrib.timeseries.python.timeseries import NumpyReader | ||
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators | ||
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model | ||
|
||
matplotlib.use("agg") | ||
|
||
import matplotlib.pyplot as plt | ||
|
||
|
||
class _LSTMModel(ts_model.SequentialTimeSeriesModel): | ||
"""A time series model-building example using an RNNCell.""" | ||
|
||
def __init__(self, num_units, num_features, dtype=tf.float32): | ||
"""Initialize/configure the model object. | ||
Note that we do not start graph building here. Rather, this object is a | ||
configurable factory for TensorFlow graphs which are run by an Estimator. | ||
Args: | ||
num_units: The number of units in the model's LSTMCell. | ||
num_features: The dimensionality of the time series (features per | ||
timestep). | ||
dtype: The floating point data type to use. | ||
""" | ||
super(_LSTMModel, self).__init__( | ||
# Pre-register the metrics we'll be outputting (just a mean here). | ||
train_output_names=["mean"], | ||
predict_output_names=["mean"], | ||
num_features=num_features, | ||
dtype=dtype) | ||
self._num_units = num_units | ||
# Filled in by initialize_graph() | ||
self._lstm_cell = None | ||
self._lstm_cell_run = None | ||
self._predict_from_lstm_output = None | ||
|
||
def initialize_graph(self, input_statistics): | ||
"""Save templates for components, which can then be used repeatedly. | ||
This method is called every time a new graph is created. It's safe to start | ||
adding ops to the current default graph here, but the graph should be | ||
constructed from scratch. | ||
Args: | ||
input_statistics: A math_utils.InputStatistics object. | ||
""" | ||
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics) | ||
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units) | ||
# Create templates so we don't have to worry about variable reuse. | ||
self._lstm_cell_run = tf.make_template( | ||
name_="lstm_cell", | ||
func_=self._lstm_cell, | ||
create_scope_now_=True) | ||
# Transforms LSTM output into mean predictions. | ||
self._predict_from_lstm_output = tf.make_template( | ||
name_="predict_from_lstm_output", | ||
func_=lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features), | ||
create_scope_now_=True) | ||
|
||
def get_start_state(self): | ||
"""Return initial state for the time series model.""" | ||
return ( | ||
# Keeps track of the time associated with this state for error checking. | ||
tf.zeros([], dtype=tf.int64), | ||
# The previous observation or prediction. | ||
tf.zeros([self.num_features], dtype=self.dtype), | ||
# The state of the RNNCell (batch dimension removed since this parent | ||
# class will broadcast). | ||
[tf.squeeze(state_element, axis=0) | ||
for state_element | ||
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)]) | ||
|
||
def _transform(self, data): | ||
"""Normalize data based on input statistics to encourage stable training.""" | ||
mean, variance = self._input_statistics.overall_feature_moments | ||
return (data - mean) / variance | ||
|
||
def _de_transform(self, data): | ||
"""Transform data back to the input scale.""" | ||
mean, variance = self._input_statistics.overall_feature_moments | ||
return data * variance + mean | ||
|
||
def _filtering_step(self, current_times, current_values, state, predictions): | ||
"""Update model state based on observations. | ||
Note that we don't do much here aside from computing a loss. In this case | ||
it's easier to update the RNN state in _prediction_step, since that covers | ||
running the RNN both on observations (from this method) and our own | ||
predictions. This distinction can be important for probabilistic models, | ||
where repeatedly predicting without filtering should lead to low-confidence | ||
predictions. | ||
Args: | ||
current_times: A [batch size] integer Tensor. | ||
current_values: A [batch size, self.num_features] floating point Tensor | ||
with new observations. | ||
state: The model's state tuple. | ||
predictions: The output of the previous `_prediction_step`. | ||
Returns: | ||
A tuple of new state and a predictions dictionary updated to include a | ||
loss (note that we could also return other measures of goodness of fit, | ||
although only "loss" will be optimized). | ||
""" | ||
state_from_time, prediction, lstm_state = state | ||
with tf.control_dependencies( | ||
[tf.assert_equal(current_times, state_from_time)]): | ||
transformed_values = self._transform(current_values) | ||
# Use mean squared error across features for the loss. | ||
predictions["loss"] = tf.reduce_mean( | ||
(prediction - transformed_values) ** 2, axis=-1) | ||
# Keep track of the new observation in model state. It won't be run | ||
# through the LSTM until the next _imputation_step. | ||
new_state_tuple = (current_times, transformed_values, lstm_state) | ||
return (new_state_tuple, predictions) | ||
|
||
def _prediction_step(self, current_times, state): | ||
"""Advance the RNN state using a previous observation or prediction.""" | ||
_, previous_observation_or_prediction, lstm_state = state | ||
lstm_output, new_lstm_state = self._lstm_cell_run( | ||
inputs=previous_observation_or_prediction, state=lstm_state) | ||
next_prediction = self._predict_from_lstm_output(lstm_output) | ||
new_state_tuple = (current_times, next_prediction, new_lstm_state) | ||
return new_state_tuple, {"mean": self._de_transform(next_prediction)} | ||
|
||
def _imputation_step(self, current_times, state): | ||
"""Advance model state across a gap.""" | ||
# Does not do anything special if we're jumping across a gap. More advanced | ||
# models, especially probabilistic ones, would want a special case that | ||
# depends on the gap size. | ||
return state | ||
|
||
def _exogenous_input_step( | ||
self, current_times, current_exogenous_regressors, state): | ||
"""Update model state based on exogenous regressors.""" | ||
raise NotImplementedError( | ||
"Exogenous inputs are not implemented for this example.") | ||
|
||
|
||
def prophet_lstm_values(values_y: [], save_img=False, pic_name='predict_result.png', window_size=100): | ||
""" | ||
使用lstm预测一维值序列 | ||
:param values_y: 值序列 | ||
:param save_img: 是否保存图片?否的话返回plt对象 | ||
:param pic_name: 保存名称 | ||
:param window_size: 最小计算窗口值 | ||
:return: 若未保存图片,返回plt对象 | ||
""" | ||
times_x = [i for i in range(len(values_y))] | ||
return prophet_lstm(times_x, values_y, save_img=save_img, pic_name=pic_name, window_size=window_size) | ||
|
||
|
||
def prophet_lstm(times_x: [], values_y: [], save_img=False, pic_name='predict_result.png', window_size=100): | ||
""" | ||
使用lstm预测二维时间值序列 | ||
:param times_x: 时间序列 | ||
:param values_y: 值序列 | ||
:param save_img: 是否保存图片?否的话返回plt对象 | ||
:param pic_name: 保存名称 | ||
:param window_size: 最小计算窗口值 | ||
:return: 若未保存图片,返回plt对象 | ||
""" | ||
times_x = np.array(times_x) | ||
values_y = np.array(values_y) | ||
tf.logging.set_verbosity(tf.logging.INFO) | ||
|
||
data = { | ||
tf.contrib.timeseries.TrainEvalFeatures.TIMES: times_x, | ||
tf.contrib.timeseries.TrainEvalFeatures.VALUES: values_y, | ||
} | ||
|
||
reader = NumpyReader(data) | ||
|
||
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(reader, | ||
batch_size=4, | ||
window_size=window_size) | ||
|
||
estimator = ts_estimators.TimeSeriesRegressor(model=_LSTMModel(num_features=1, | ||
num_units=128), | ||
optimizer=tf.train.AdamOptimizer(0.001)) | ||
estimator.train(input_fn=train_input_fn, | ||
steps=2000) | ||
|
||
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader) | ||
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, | ||
steps=1) | ||
|
||
# Predict starting after the evaluation | ||
(predictions,) = tuple(estimator.predict(input_fn= | ||
tf.contrib.timeseries.predict_continuation_input_fn(evaluation, | ||
steps=200))) | ||
|
||
observed_times = evaluation["times"][0] | ||
observed = evaluation["observed"][0, :, :] | ||
evaluated_times = evaluation["times"][0] | ||
evaluated = evaluation["mean"][0] | ||
predicted_times = predictions['times'] | ||
predicted = predictions["mean"] | ||
|
||
plt.figure(figsize=(15, 5)) | ||
plt.axvline(999, | ||
linestyle="dotted", | ||
linewidth=4, | ||
color='r') | ||
observed_lines = plt.plot(observed_times, | ||
observed, | ||
label="observation", | ||
color="k") | ||
evaluated_lines = plt.plot(evaluated_times, | ||
evaluated, | ||
label="evaluation", | ||
color="g") | ||
predicted_lines = plt.plot(predicted_times, | ||
predicted, | ||
label="prediction", | ||
color="r") | ||
plt.legend(handles=[observed_lines[0], | ||
evaluated_lines[0], | ||
predicted_lines[0]], | ||
loc="upper left") | ||
|
||
if save_img: | ||
plt.savefig(pic_name) | ||
return plt |
File renamed without changes.
This file was deleted.
Oops, something went wrong.
Oops, something went wrong.