[research/lotterysim] simulate timelocked airdrop, enhance log

This commit is contained in:
ertosns
2023-07-20 17:23:51 +03:00
parent ee644898c5
commit f0a4095833
5 changed files with 18 additions and 11 deletions

View File

@@ -41,4 +41,4 @@ BASE_L_HP = Num(BASE_L)
# HEADSTART AIRDROP period ~ 1 month
HEADSTART_AIRDROP=2880
SLASHING_RATIO = 0.0001
SLASHING_RATIO = 0.01

View File

@@ -27,13 +27,18 @@ class Darkie():
def vesting_wrapped_initial_stake(self):
#print('initial stake: {}, corresponding vesting: {}'.format(self.initial_stake[0], self.vesting[int((self.slot)/VESTING_PERIOD)]))
# note index is previous slot since update_vesting is called after background execution.
return self.current_vesting() if self.slot>0 else self.initial_stake[-1]
#return self.current_vesting() if self.slot>0 else self.initial_stake[-1]
return (self.current_vesting() if self.slot>0 else self.initial_stake[-1]) + self.initial_stake[-1]
def apr_scaled_to_runningtime(self):
initial_stake = self.vesting_wrapped_initial_stake()
#print('stake: {}, initial_stake: {}'.format(self.stake, initial_stake))
assert self.stake >= initial_stake, 'stake: {}, initial_stake: {}, slot: {}, current: {}, previous: {} vesting'.format(self.stake, initial_stake, self.slot, self.current_vesting(), self.prev_vesting())
return Num(self.stake - initial_stake) / Num(initial_stake) * Num(ONE_YEAR/(self.slot/EPOCH_LENGTH)) if self.slot> 0 and initial_stake>0 else 0
#if self.slot%100==0:
#print('stake: {}, initial stake: {}'.format(self.stake, initial_stake))
#print(self.initial_stake)
apr = Num(self.stake - initial_stake) / Num(initial_stake) * Num(ONE_YEAR/(self.slot/EPOCH_LENGTH)) if self.slot> 0 and initial_stake>0 else 0
return apr
def staked_tokens(self):
'''
@@ -63,7 +68,7 @@ class Darkie():
x = (Num(1) if hp else 1) - (Num(tune_parameter) if hp else tune_parameter)
c = (x.ln() if type(x)==Num else math.log(x))
sigmas = [ c/((self.Sigma+EPSILON)**i) * ( ((L_HP if hp else L)/fact(i)) ) for i in range(1, k+1) ]
scaled_target = approx_target_in_zk(sigmas, Num(stake)) + (BASE_L_HP if hp else BASE_L)
scaled_target = approx_target_in_zk(sigmas, Num(stake)) + ((BASE_L_HP if hp else BASE_L) if self.slot < HEADSTART_AIRDROP else 0)
return scaled_target
if self.slot % EPOCH_LENGTH ==0 and self.slot > 0:
@@ -71,7 +76,8 @@ class Darkie():
# staked ratio is added in strategy
self.strategy.set_ratio(self.slot, apr)
# epoch stake is added
self.initial_stake +=[self.stake]
if self.slot < HEADSTART_AIRDROP:
self.initial_stake +=[self.stake]
#if self.slot == HEADSTART_AIRDROP:
# self.initial_stake += [self.stake]
T = target(self.f, self.strategy.staked_value(self.stake))

View File

@@ -67,8 +67,8 @@ class DarkfiTable:
if random.random() < SLASHING_RATIO:
self.darkies.remove(self.darkies[i])
print('stakeholder {} slashed'.format(i))
break
self.darkies[i].update_stake(self.rewards[-1])
else:
self.darkies[i].update_stake(self.rewards[-1])
break
# resolve finalization
self.Sigma += self.rewards[-1]
@@ -93,7 +93,7 @@ class DarkfiTable:
break
self.darkies[darkie_winning_idx].resync_stake(resync_reward)
self.Sigma += resync_reward
rt_range.set_description('issuance {} DRK, acc: {}, stake = {}%, sr: {}%, reward:{}'.format(round(sum(self.rewards),2), round(acc,2), round(total_stake/self.Sigma*100 if self.Sigma>0 else 0,2), self.avg_stake_ratio()*100, self.rewards[-1]))
rt_range.set_description('epoch: {}, issuance {} DRK, acc: {}, stake = {}%, sr: {}%, reward:{}, apr: {}'.format(int(count/EPOCH_LENGTH), round(self.Sigma,2), round(acc,2), round(total_stake/self.Sigma*100 if self.Sigma>0 else 0,2), round(self.avg_stake_ratio()*100,2) , round(self.rewards[-1],2), round(self.avg_apr(),2)))
#print('[2]stake: {}, sigma: {}, reward: {}'.format(total_stake, self.Sigma, self.rewards[-1]))
assert(round(total_stake,1) <= round(self.Sigma,1))
count+=1

View File

@@ -14,7 +14,8 @@ NODES=1000
if __name__ == "__main__":
darkies = [Darkie(0, strategy=LinearStrategy(EPOCH_LENGTH)) for _ in range(NODES)]
dt = DarkfiTable(0, RUNNING_TIME, CONTROLLER_TYPE_DISCRETE, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.03840000000000491, r_kp=-2.53, r_ki=29.5, r_kd=53.77)
#dt = DarkfiTable(0, RUNNING_TIME, CONTROLLER_TYPE_DISCRETE, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.03840000000000491, r_kp=-2.53, r_ki=29.5, r_kd=53.77)
dt = DarkfiTable(0, RUNNING_TIME, CONTROLLER_TYPE_DISCRETE, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.03840000000000491, r_kp=-0.719, r_ki=1.6, r_kd=0.1)
for darkie in darkies:
dt.add_darkie(darkie)
acc, avg_apy, avg_reward, stake_ratio, avg_apr = dt.background(rand_running_time=False)

View File

@@ -39,7 +39,7 @@ highest_gain = (KP_SEARCH, KI_SEARCH, KD_SEARCH)
parser = ArgumentParser()
parser.add_argument('-p', '--high-precision', action='store_false', default=False)
parser.add_argument('-r', '--randomizenodes', action='store_true', default=True)
parser.add_argument('-r', '--randomizenodes', action='store_false', default=True)
parser.add_argument('-t', '--rand-running-time', action='store_true', default=True)
parser.add_argument('-d', '--debug', action='store_false')
args = parser.parse_args()
@@ -49,7 +49,7 @@ rand_running_time = args.rand_running_time
debug = args.debug
def experiment(controller_type=CONTROLLER_TYPE_DISCRETE, rkp=0, rki=0, rkd=0, distribution=[], hp=True):
dt = DarkfiTable(ERC20DRK, RUNNING_TIME, controller_type, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.03840000000000491, r_kp=rkp, r_ki=rki, r_kd=rkd)
dt = DarkfiTable(ERC20DRK, RUNNING_TIME, controller_type, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.038400000000004f91, r_kp=rkp, r_ki=rki, r_kd=rkd)
RND_NODES = random.randint(5, NODES) if randomize_nodes else NODES
for idx in range(0,RND_NODES):
darkie = Darkie(distribution[idx], strategy=random_strategy(EPOCH_LENGTH))