Struct renforce::trainer::SARSALearner
[−]
[src]
pub struct SARSALearner { /* fields omitted */ }
Represents an OnlineTrainer for Q-functions Uses the SARSA algorithm
Methods
impl SARSALearner
[src]
fn new(gamma: f64, alpha: f64, train_period: TimePeriod) -> SARSALearner
Returns a new SARSALearner with the given info
fn gamma(self, gamma: f64) -> SARSALearner
Sets gamma field of self
fn alpha(self, alpha: f64) -> SARSALearner
Sets alpha field of self
fn train_period(self, train_period: TimePeriod) -> SARSALearner
Sets train_period field of self
Trait Implementations
impl Debug for SARSALearner
[src]
impl<T, S: Space, A: Space> OnlineTrainer<S, A, T> for SARSALearner where T: QFunction<S, A> + Agent<S, A>
[src]
fn train_step(&mut self, agent: &mut T, transition: Transition<S, A>)
Performs one training iteration using the given transition
fn train(&mut self, agent: &mut T, env: &mut Environment<State=S, Action=A>)
Automatically trains the agent to perform well in the environment
impl Default for SARSALearner
[src]
fn default() -> SARSALearner
Creates a new SARSALearner with default values for gamma, alpha, and train_period