Skip to content

Commit

Permalink
Improve the FETALinear documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
timokau committed May 12, 2020
1 parent 3296b67 commit 9450337
Showing 1 changed file with 81 additions and 1 deletion.
82 changes: 81 additions & 1 deletion csrank/core/feta_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,20 @@


class FETALinearCore(Learner):
"""Core Learner implementing the First Evaluate then Aggregate approach.
This implements the FETA approach introduced in [PfGuH18]. The idea is to
first evaluate each object in each sub-contxt of fixed size and then to
aggregate these evaluations.
References
----------
.. [PfGuH18] Pfannschmidt, K., Gupta, P., & Hüllermeier, E. (2018). Deep
architectures for learning context-dependent ranking functions. arXiv
preprint arXiv:1803.05796. https://arxiv.org/pdf/1803.05796.pdf
"""

def __init__(
self,
learning_rate=1e-3,
Expand All @@ -23,6 +37,29 @@ def __init__(
random_state=None,
**kwargs
):
"""
Parameters
----------
learning_rate : float
The learning rate used by the gradient descent optimizer.
batch_size : int
The size of the mini-batches used to train the Neural Network.
loss_function
The loss function to minimize when training the Neural Network. See
the functions offered in the keras.losses module for more details.
epochs_drop: int
The amount of training epochs after which the learning rate is
decreased by a factor of `drop`.
drop: float
The factor by which to decrease the learning rate every
`epochs_drop` epochs.
random_state: np.RandomState
The random state to use in this object.
Returns
-------
model : pymc3 Model :class:`pm.Model`
"""
self.learning_rate = learning_rate
self.batch_size = batch_size
self.random_state = random_state
Expand Down Expand Up @@ -93,6 +130,18 @@ def _construct_model_(self, n_objects, n_object_features):
)

def step_decay(self, epoch):
"""Update the current learning rate.
Computes the current learning rate based on the initial learning rate,
the current epoch and the decay speed set by the `epochs_drop` and
`drop` hyperparameters.
Parameters
----------
epoch: int
The current epoch.
"""
step = math.floor((1 + epoch) / self.epochs_drop)
self.current_lr = self.learning_rate * math.pow(self.drop, step)
self.optimizer = tf.train.GradientDescentOptimizer(self.current_lr).minimize(
Expand All @@ -102,6 +151,22 @@ def step_decay(self, epoch):
def fit(
self, X, Y, epochs=10, callbacks=None, validation_split=0.1, verbose=0, **kwd
):
"""
Fit the preference learning algorithm on the provided set of queries X
and preferences Y of those objects. The provided queries and
corresponding preferences are of a fixed size (numpy arrays).
Parameters
----------
X : array-like, shape (n_samples, n_objects, n_features)
Feature vectors of the objects
Y : array-like, shape (n_samples, n_objects)
Preferences of the objects in form of rankings or choices
epochs: int
The amount of epochs to train for. The training loop will try to
predict the target variables and adjust its parameters by gradient
descent `epochs` times.
"""
self.random_state_ = check_random_state(self.random_state)
# Global Variables Initializer
n_instances, n_objects, n_features = X.shape
Expand Down Expand Up @@ -149,6 +214,18 @@ def _fit_(self, X, Y, epochs, n_instances, tf_session, verbose):
self.logger.info("Epoch {}: cost {} ".format((epoch + 1), np.mean(c)))

def _predict_scores_fixed(self, X, **kwargs):
"""Predict the scores for a given collection of sets of objects of same size.
Parameters
----------
X : array-like, shape (n_samples, n_objects, n_features)
Returns
-------
Y : array-like, shape (n_samples, n_objects)
Returns the scores of each of the objects for each of the samples.
"""
n_instances, n_objects, n_features = X.shape
outputs = [list() for _ in range(n_objects)]
for i, j in combinations(range(n_objects), 2):
Expand All @@ -170,7 +247,10 @@ def set_tunable_parameters(
self, learning_rate=1e-3, batch_size=128, epochs_drop=300, drop=0.1, **point
):
"""
Set tunable parameters of the FETA-network to the values provided.
Set tunable hyperparameters of the FETA-network to the values provided.
This can be used for automatic hyperparameter optimization. See
csrank.tuning for more information.
Parameters
----------
Expand Down

0 comments on commit 9450337

Please sign in to comment.