Struct renforce::trainer::QLearner
[−]
[src]
pub struct QLearner<A: FiniteSpace> { /* fields omitted */ }
Represents an OnlineTrainer for Q-functions Uses the Q-learning algorithm
Methods
impl<A: FiniteSpace> QLearner<A>
[src]
fn new(action_space: A,
gamma: f64,
alpha: f64,
train_period: TimePeriod)
-> QLearner<A>
gamma: f64,
alpha: f64,
train_period: TimePeriod)
-> QLearner<A>
Returns a new QLearner with the given info
fn default(action_space: A) -> QLearner<A>
Creates a new QLearner with default gamma, alpha, and train_period
fn gamma(self, gamma: f64) -> QLearner<A>
Sets gamma field of self
fn alpha(self, alpha: f64) -> QLearner<A>
Sets alpha field of self
fn train_period(self, train_period: TimePeriod) -> QLearner<A>
Sets train_period field of self
Trait Implementations
impl<A: Debug + FiniteSpace> Debug for QLearner<A>
[src]
impl<T, S: Space, A: FiniteSpace> OnlineTrainer<S, A, T> for QLearner<A> where T: QFunction<S, A> + Agent<S, A>
[src]
fn train_step(&mut self, agent: &mut T, transition: Transition<S, A>)
Performs one training iteration using the given transition
fn train(&mut self, agent: &mut T, env: &mut Environment<State=S, Action=A>)
Automatically trains the agent to perform well in the environment