mirror of
https://github.com/darkrenaissance/darkfi.git
synced 2026-01-10 07:08:05 -05:00
[research/lotterysim] write log every 100th step of running time
This commit is contained in:
@@ -25,7 +25,7 @@ REWARD_MAX = 1000
|
||||
# slot length in seconds
|
||||
SLOT = 90
|
||||
# epoch length in slots
|
||||
EPOCH_LENGTH = 10
|
||||
EPOCH_LENGTH = 100
|
||||
# one month in slots
|
||||
ONE_MONTH = 60*60*24*30/SLOT
|
||||
# one year in slots
|
||||
@@ -53,9 +53,9 @@ EPSILON = 1
|
||||
# window of accuracy calculation
|
||||
ACC_WINDOW = int(EPOCH_LENGTH)*10
|
||||
# headstart airdrop period
|
||||
HEADSTART_AIRDROP = 500
|
||||
HEADSTART_AIRDROP = ONE_MONTH
|
||||
# threshold of randomly slashing stakeholder
|
||||
SLASHING_RATIO = 0.001
|
||||
SLASHING_RATIO = 0.00001
|
||||
# number of nodes
|
||||
NODES = 1000
|
||||
# headstart value
|
||||
@@ -69,4 +69,4 @@ REWARD_MIN_HP = Num(REWARD_MIN)
|
||||
REWARD_MAX_HP = Num(REWARD_MAX)
|
||||
BASE_L_HP = Num(BASE_L)
|
||||
CC_DIFF_EPSILON=0.0001
|
||||
MIL_SLOT = 500
|
||||
MIL_SLOT = 1000
|
||||
|
||||
@@ -48,7 +48,6 @@ class DarkfiTable:
|
||||
rand_running_time = random.randint(1,self.running_time) if rand_running_time else self.running_time
|
||||
self.running_time = rand_running_time
|
||||
rt_range = tqdm(np.arange(0,self.running_time, 1))
|
||||
|
||||
# loop through slots
|
||||
for slot in rt_range:
|
||||
# calculate probability of winning owning 100% of stake
|
||||
@@ -85,6 +84,12 @@ class DarkfiTable:
|
||||
rt_range.set_description('epoch: {}, fork: {}, winners: {}, issuance {} DRK, f: {}, acc: {}%, stake: {}%, sr: {}%, reward:{}, apr: {}%, basefee: {}, avg(fee): {}, cc_diff: {}, avg(y): {}, avg(T): {}'.format(int(slot/EPOCH_LENGTH), self.merge_length(), self.winners[-1], round(self.Sigma,2), round(f, 5), round(self.secondary_pid.acc()*100, 2), round(total_stake/self.Sigma*100 if self.Sigma>0 else 0,2), round(self.avg_stake_ratio()*100,2) , round(self.rewards[-1],2), round(self.avg_apr()*100,2), round(base_fee, 5), round(avg_tip, 2), round(cc_diff, 5), round(float(avg_y), 2), round(float(avg_t), 2)))
|
||||
#assert round(total_stake,1) <= round(self.Sigma,1), 'stake: {}, sigma: {}'.format(total_stake, self.Sigma)
|
||||
slot+=1
|
||||
step = int(self.running_time/100)
|
||||
if slot%step == 0 and slot>0:
|
||||
self.end_time=time.time()
|
||||
self.write()
|
||||
self.start_time=time.time()
|
||||
|
||||
self.end_time=time.time()
|
||||
avg_reward = sum(self.rewards)/len(self.rewards)
|
||||
stake_ratio = self.avg_stake_ratio()
|
||||
|
||||
@@ -140,6 +140,7 @@ class RewardApr(Tip):
|
||||
|
||||
def get_tip(self, last_reward, apr, size, last_tip):
|
||||
apr_relu = max(apr, 0)
|
||||
apr_relu = min(apr_relu, 1)
|
||||
return last_reward*apr_relu
|
||||
|
||||
class TenthRewardApr(Tip):
|
||||
@@ -149,6 +150,7 @@ class TenthRewardApr(Tip):
|
||||
|
||||
def get_tip(self, last_reward, apr, size, last_tip):
|
||||
apr_relu = max(apr, 0)
|
||||
apr_relu = min(apr_relu, 1)
|
||||
return last_reward*apr_relu/10
|
||||
|
||||
|
||||
@@ -194,4 +196,4 @@ class Generous(Tip):
|
||||
|
||||
|
||||
def random_tip_strategy():
|
||||
return random.choice([ZeroTip(), HundredthOfReward(), MilthOfReward(), RewardApr(), TenthCCApr(), HundredthCCApr(), MilthCCApr(), Conservative(), Generous()])
|
||||
return random.choice([ZeroTip(), MilthOfReward(), MilthCCApr(), Conservative(), Generous()])
|
||||
|
||||
@@ -23,7 +23,7 @@ legends = []
|
||||
for darkie in darkies:
|
||||
legend = ["darkie{}".format(darkie[3])]
|
||||
legends +=[legend]
|
||||
plt.legend(legends, loc='upper left')
|
||||
#plt.legend(legends, loc='upper left')
|
||||
plt.savefig("log/plot_darkies_is.png")
|
||||
plt.close()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user