Compare commits

..

1 Commits
ucb ... master

Author SHA1 Message Date
a583855e47 fix random seeds with np.randon.RandomState() 2023-10-29 23:18:48 +08:00
7 changed files with 20 additions and 73 deletions

View File

@ -1,2 +1 @@
from .util import create_agent_helper
from .ucb_agent import UCBAgent
from .util import create_agent_helper

View File

@ -1,59 +0,0 @@
from recsim.agent import AbstractEpisodicRecommenderAgent
import tensorflow as tf
import numpy as np
class UCBAgent(AbstractEpisodicRecommenderAgent):
def __init__(self, sess, observation_space, action_space, eval_mode, alpha=1.0, learning_rate=0.001, summary_writer=None):
super(UCBAgent, self).__init__(action_space, summary_writer)
self._num_candidates = int(action_space.nvec[0])
self._W = tf.Variable(np.random.uniform(0, 10, size=(self._num_candidates, 3)), name='W')
self._sess = sess
self._return_idx = None
self._prev_pred_pr = None
self._opt = tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
self._alpha = alpha
assert self._slate_size == 1
def step(self, reward, observation):
docs = observation['doc']
user = observation['user']
response = observation['response']
if self._return_idx != None and response != None:
# update w
y_true = [response[0]['recall']]
y_pred = self._prev_pred_pr
loss = tf.losses.binary_crossentropy(y_true, y_pred)
self._sess.run(self._opt.minimize(loss))
base_pr = self.calc_prs(user['time'], user['last_review'], user['history'], self._W)
time = user['time'] + 1
history_pos = user['history'].copy()
history_pos[:, [0, 1]] += 1 # add n, n+ by 1
history_neg = user['history'].copy()
history_neg[:, [0, 2]] += 1 # add n, n- by 1
last_review_now = np.repeat(user['time'], len(user['last_review']))
pr_pos = self.calc_prs(time, last_review_now, history_pos, self._W)
pr_neg = self.calc_prs(time, last_review_now, history_neg, self._W)
gain = (pr_pos + pr_neg) / 2 - base_pr
time_since_last_review = user['time'] - user['last_review']
uncertainty = self._alpha * tf.math.sqrt(tf.math.log(time_since_last_review) / user['history'][:, 0])
# print(gain.eval(session=self._sess))
# print(time_since_last_review)
# print(uncertainty.eval(session=self._sess))
ucb_score = gain + uncertainty
print(" gain:", gain.eval(session=self._sess))
print("uncertainty:", uncertainty.eval(session=self._sess))
best_idx = tf.argmax(ucb_score)
self._return_idx = self._sess.run(best_idx)
self._prev_pred_pr = base_pr[self._return_idx]
return [self._return_idx]
def calc_prs(self, train_time, last_review, history, W):
last_review = train_time - last_review
mem_param = tf.math.exp(tf.reduce_sum(history * W, axis=1))
pr = tf.math.exp(-last_review / mem_param)
return pr

View File

@ -2,13 +2,13 @@ from .FlashcardDocument import FlashcardDocument
from recsim import document
class FlashcardDocumentSampler(document.AbstractDocumentSampler):
def __init__(self, doc_ctor=FlashcardDocument, **kwargs):
super(FlashcardDocumentSampler, self).__init__(doc_ctor, **kwargs)
def __init__(self, doc_ctor=FlashcardDocument, seed=0, **kwargs):
super(FlashcardDocumentSampler, self).__init__(doc_ctor, seed, **kwargs)
self._doc_count = 0
def sample_document(self):
doc_features = {}
doc_features['doc_id'] = self._doc_count
doc_features['difficulty'] = self._rng.uniform(0, 5, (1, 3))
doc_features['difficulty'] = self._rng.uniform(0, 3, (1, 3))
self._doc_count += 1
return self._doc_ctor(**doc_features)

View File

@ -17,8 +17,8 @@ tf.compat.v1.disable_eager_execution()
create_agent_fn = create_agent_helper(full_slate_q_agent.FullSlateQAgent)
ltsenv = environment.Environment(
FlashcardUserModel(num_candidates, time_budget, slate_size),
FlashcardDocumentSampler(),
FlashcardUserModel(num_candidates, time_budget, slate_size, seed=0, sample_seed=0),
FlashcardDocumentSampler(seed=0),
num_candidates,
slate_size,
resample_documents=False)

View File

@ -7,13 +7,14 @@ from util import eval_result
import numpy as np
class FlashcardUserModel(user.AbstractUserModel):
def __init__(self, num_candidates, time_budget, slate_size, seed=0):
def __init__(self, num_candidates, time_budget, slate_size, seed=0, sample_seed=0):
super(FlashcardUserModel, self).__init__(
UserResponse, UserSampler(
UserState, num_candidates, time_budget,
seed=seed
seed=sample_seed
), slate_size)
self.choice_model = MultinomialLogitChoiceModel({})
self._rng = np.random.RandomState(seed)
def is_terminal(self):
terminated = self._user_state._time > self._user_state._time_budget
@ -52,7 +53,8 @@ class FlashcardUserModel(user.AbstractUserModel):
doc_id = doc._doc_id
W = self._user_state._W[doc_id]
if not W.any(): # uninitialzed
self._user_state._W[doc_id] = W = doc.base_difficulty * np.random.uniform(0.5, 2.0, (1, 3)) # a uniform error for each user
error = self._user_state._doc_error[doc_id] # a uniform error for each user
self._user_state._W[doc_id] = W = doc.base_difficulty * error
print(W)
# use exponential function to simulate whether the user recalls
last_review = self._user_state._time - self._user_state._last_review[doc_id]
@ -60,6 +62,6 @@ class FlashcardUserModel(user.AbstractUserModel):
pr = np.exp(-last_review / np.exp(np.dot(W, x))).squeeze()
print(f"time: {self._user_state._time}, reviewing flashcard {doc_id}, recall rate = {pr}")
if np.random.rand() < pr: # remembered
if self._rng.random_sample() < pr: # remembered
response._recall = True
response._pr = pr

View File

@ -7,9 +7,13 @@ class UserSampler(user.AbstractUserSampler):
num_candidates=10,
time_budget=60,
**kwargs):
self._state_parameters = {'num_candidates': num_candidates, 'time_budget': time_budget}
super(UserSampler, self).__init__(user_ctor, **kwargs)
doc_error = self._rng.uniform(0.5, 1.5, (num_candidates, 3))
self._state_parameters = {
'num_candidates': num_candidates,
'time_budget': time_budget,
'doc_error': doc_error
}
def sample_user(self):
return self._user_ctor(**self._state_parameters)

View File

@ -3,13 +3,14 @@ import numpy as np
from gym import spaces
class UserState(user.AbstractUserState):
def __init__(self, num_candidates, time_budget):
def __init__(self, num_candidates, time_budget, doc_error):
self._cards = num_candidates
self._history = np.zeros((num_candidates, 3))
self._last_review = np.repeat(-1.0, num_candidates)
self._time_budget = time_budget
self._time = 0
self._W = np.zeros((num_candidates, 3))
self._doc_error = doc_error
super(UserState, self).__init__()
def create_observation(self):
return {'history': self._history, 'last_review': self._last_review, 'time': self._time, 'time_budget': self._time_budget}