Browse Source

rebalance -> reallocate and implement lite version for user changes

master
forest 3 weeks ago
parent
commit
dfd386746d
6 changed files with 300 additions and 262 deletions
  1. +3
    -0
      .gitignore
  2. +274
    -248
      backend.go
  3. +2
    -2
      frontend/admin.gotemplate.html
  4. +4
    -4
      frontend_admin_panel.go
  5. +8
    -1
      frontend_profile.go
  6. +9
    -7
      scheduled_tasks.go

+ 3
- 0
.gitignore View File

@ -18,3 +18,6 @@ greenhouse-daemon/threshold.pid
greenhouse-daemon/threshold.log
greenhouse-daemon/caddy.log
greenhouse-daemon/daemon.log
greenhouse-daemon/old
greenhouse-daemon/old2
greenhouse-daemon/old3

+ 274
- 248
backend.go View File

@ -221,6 +221,11 @@ func (app *BackendApp) InitializeTenant(tenantId int, email string) error {
return errors.Wrapf(err, "InitializeTenant() for '%s':", email)
}
err = app.Reallocate(false, false)
if err != nil {
return errors.Wrapf(err, "InitializeTenant() for '%s':", email)
}
return nil
}
@ -549,10 +554,10 @@ func (app *BackendApp) ValidateExternalDomain(externalDomain, personalDomain str
googleIsValid := strings.HasSuffix(cnameFromGoogle, personalDomainWithPeriodAtTheEnd)
quad9IsValid := strings.HasSuffix(cnameFromQuad9, personalDomainWithPeriodAtTheEnd)
defaultIsValid := strings.HasSuffix(cnameFromDefaultResolver, personalDomainWithPeriodAtTheEnd)
log.Printf(
"ValidateExternalDomain(): %s --> %s,%s,%s\n(personalDomain=%s) (valid: %t,%t,%t)\n",
externalDomain, cnameFromGoogle, cnameFromQuad9, cnameFromDefaultResolver, personalDomain, googleIsValid, quad9IsValid, defaultIsValid,
)
// log.Printf(
// "ValidateExternalDomain(): %s --> %s,%s,%s\n(personalDomain=%s) (valid: %t,%t,%t)\n",
// externalDomain, cnameFromGoogle, cnameFromQuad9, cnameFromDefaultResolver, personalDomain, googleIsValid, quad9IsValid, defaultIsValid,
// )
if googleIsValid || quad9IsValid || defaultIsValid {
if updateDatabase {
err := app.Model.MarkExternalDomainAsVerified(externalDomain)
@ -569,18 +574,18 @@ func (app *BackendApp) ValidateExternalDomain(externalDomain, personalDomain str
return false, errors.New("ran out of attempts and niether succeeded nor failed, this should never happen :\\")
}
func (app *BackendApp) Rebalance() error {
log.Println("Starting Rebalance Process... ")
completed, err := app.tryRebalance()
func (app *BackendApp) Reallocate(rebalanceVPSInstances, reshuffleTenants bool) error {
log.Println("Starting Reallocate Process... ")
completed, err := app.tryReallocate(rebalanceVPSInstances, reshuffleTenants)
if !completed && err == nil {
log.Println("Rebalance not complete yet. Running backendApp.tryRebalance() again")
_, err := app.tryRebalance()
log.Println("Reallocate not complete yet. Running backendApp.tryReallocate() again")
_, err := app.tryReallocate(rebalanceVPSInstances, reshuffleTenants)
return err
}
return err
}
func (app *BackendApp) tryRebalance() (bool, error) {
func (app *BackendApp) tryReallocate(rebalanceVPSInstances bool, reshuffleTenants bool) (bool, error) {
billingYear, billingMonth, _, _, amountOfMonthElapsed := getBillingTimeInfo()
@ -597,25 +602,42 @@ func (app *BackendApp) tryRebalance() (bool, error) {
return false, err
}
validVpsInstances, onlyDBInstances, onlyCloudInstances, err := app.GetInstances()
if err != nil {
return false, err
}
var validVpsInstances map[string]*VPSInstance
errorStrings := []string{}
for _, v := range onlyDBInstances {
errorStrings = append(errorStrings, fmt.Sprintf("instance %s (%s) is in the database, but not in the provider api", v.GetId(), v.IPV4))
}
for _, v := range onlyCloudInstances {
errorStrings = append(errorStrings, fmt.Sprintf("instance %s (%s) is in the provider api, but not in the database", v.GetId(), v.IPV4))
}
if rebalanceVPSInstances {
var onlyDBInstances map[string]*VPSInstance
var onlyCloudInstances map[string]*VPSInstance
validVpsInstances, onlyDBInstances, onlyCloudInstances, err = app.GetInstances()
if err != nil {
return false, err
}
// issue warnings for inconsistencies between DB and provider
if len(errorStrings) > 0 {
return false, errors.Errorf("VPS instances are inconsistent: \n%s\n", strings.Join(errorStrings, "\n"))
errorStrings := []string{}
for _, v := range onlyDBInstances {
errorStrings = append(errorStrings, fmt.Sprintf("instance %s (%s) is in the database, but not in the provider api", v.GetId(), v.IPV4))
}
for _, v := range onlyCloudInstances {
errorStrings = append(errorStrings, fmt.Sprintf("instance %s (%s) is in the provider api, but not in the database", v.GetId(), v.IPV4))
}
// issue warnings for inconsistencies between DB and provider
if len(errorStrings) > 0 {
return false, errors.Errorf("VPS instances are inconsistent: \n%s\n", strings.Join(errorStrings, "\n"))
}
} else {
dbVPSInstances, err := app.Model.GetVPSInstances()
if err != nil {
return false, err
}
validVpsInstances = map[string]*VPSInstance{}
for k, v := range dbVPSInstances {
if !v.Deleted && !v.Deprecated {
validVpsInstances[k] = v
}
}
}
// aggregate dedicated vps instances count onto tenants
// filter out dedicated vps instances & aggregate dedicated vps instances count onto tenants
keysToRemove := []string{}
for k, vpsInstance := range validVpsInstances {
if vpsInstance.TenantId != 0 {
@ -693,74 +715,76 @@ func (app *BackendApp) tryRebalance() (bool, error) {
// -------------------------------------------------------------------------------
// STEP 1: scale the # of instances up or down depending on total projected usage.
totalUsageSoFar := int64(0)
totalUsageByActiveTenantsSoFar := int64(0)
for _, tenant := range tenants {
totalUsageSoFar += tenant.Bytes
if !tenant.Deactivated {
totalUsageByActiveTenantsSoFar += tenant.Bytes
if rebalanceVPSInstances {
totalUsageSoFar := int64(0)
totalUsageByActiveTenantsSoFar := int64(0)
for _, tenant := range tenants {
totalUsageSoFar += tenant.Bytes
if !tenant.Deactivated {
totalUsageByActiveTenantsSoFar += tenant.Bytes
}
}
}
soFarFloat := float64(totalUsageSoFar)
activeTenantsSoFarFloat := float64(totalUsageByActiveTenantsSoFar)
totalProjectedUsageFloat := soFarFloat + ((activeTenantsSoFarFloat / amountOfMonthElapsed) - activeTenantsSoFarFloat)
//totalProjectedUsage := int64(totalProjectedUsageFloat)
totalMonthlyAllotment := int64(0)
for _, vpsInstance := range validVpsInstances {
if !vpsInstance.Deprecated && !vpsInstance.Deleted {
// TODO handle BytesMonthly when node is created in the middle of the month / near end of month
amountOfMonthThatThisInstanceWillHaveBeenTurnedOnFor := float64(1)
totalMonthlyAllotment += int64(float64(vpsInstance.BytesMonthly) * amountOfMonthThatThisInstanceWillHaveBeenTurnedOnFor)
}
}
totalMonthlyAllotmentFloat := float64(totalMonthlyAllotment)
soFarFloat := float64(totalUsageSoFar)
activeTenantsSoFarFloat := float64(totalUsageByActiveTenantsSoFar)
totalProjectedUsageFloat := soFarFloat + ((activeTenantsSoFarFloat / amountOfMonthElapsed) - activeTenantsSoFarFloat)
//totalProjectedUsage := int64(totalProjectedUsageFloat)
overageFloat := totalProjectedUsageFloat - totalMonthlyAllotmentFloat
overflowAmountFloat := overageFloat - float64(projectedOverageAllowedBeforeSpawningNewInstance)
if overflowAmountFloat > 0 || len(validVpsInstances) == 0 {
instancesToCreate := int(math.Ceil(overflowAmountFloat / float64(DEFAULT_INSTANCE_MONTHLY_BYTES)))
if instancesToCreate < 1 && len(validVpsInstances) == 0 {
instancesToCreate = 1
totalMonthlyAllotment := int64(0)
for _, vpsInstance := range validVpsInstances {
if !vpsInstance.Deprecated && !vpsInstance.Deleted {
// TODO handle BytesMonthly when node is created in the middle of the month / near end of month
amountOfMonthThatThisInstanceWillHaveBeenTurnedOnFor := float64(1)
totalMonthlyAllotment += int64(float64(vpsInstance.BytesMonthly) * amountOfMonthThatThisInstanceWillHaveBeenTurnedOnFor)
}
}
log.Printf("spawning %d instances...", instancesToCreate)
totalMonthlyAllotmentFloat := float64(totalMonthlyAllotment)
overageFloat := totalProjectedUsageFloat - totalMonthlyAllotmentFloat
overflowAmountFloat := overageFloat - float64(projectedOverageAllowedBeforeSpawningNewInstance)
if overflowAmountFloat > 0 || len(validVpsInstances) == 0 {
instancesToCreate := int(math.Ceil(overflowAmountFloat / float64(DEFAULT_INSTANCE_MONTHLY_BYTES)))
if instancesToCreate < 1 && len(validVpsInstances) == 0 {
instancesToCreate = 1
}
log.Printf("spawning %d instances...", instancesToCreate)
tasks := []func() taskResult{}
for i := 0; i < instancesToCreate; i++ {
tasks = append(tasks, func() taskResult {
instance, err := app.SpawnNewMultitenantInstance()
return taskResult{
Err: err,
Result: instance,
Name: strconv.Itoa(i),
}
})
}
results := doInParallel(false, tasks...)
tasks := []func() taskResult{}
for i := 0; i < instancesToCreate; i++ {
tasks = append(tasks, func() taskResult {
instance, err := app.SpawnNewMultitenantInstance()
return taskResult{
Err: err,
Result: instance,
Name: strconv.Itoa(i),
errors := []string{}
for _, result := range results {
if result.Err != nil {
errors = append(errors, fmt.Sprintf("%+v", result.Err))
}
})
}
results := doInParallel(false, tasks...)
errors := []string{}
for _, result := range results {
if result.Err != nil {
errors = append(errors, fmt.Sprintf("%+v", result.Err))
}
}
if len(errors) > 0 {
return false, fmt.Errorf("SpawnNewMultitenantInstance failed: \n%s\n", strings.Join(errors, "\n"))
if len(errors) > 0 {
return false, fmt.Errorf("SpawnNewMultitenantInstance failed: \n%s\n", strings.Join(errors, "\n"))
}
return false, nil
}
return false, nil
}
underusageAmount := -overageFloat - float64(projectedUnderageAllowedBeforeTerminatingInstance)
if underusageAmount > 0 && len(validVpsInstances) > 1 {
instancesToDeprecate := int(math.Ceil(underusageAmount / float64(DEFAULT_INSTANCE_MONTHLY_BYTES)))
if instancesToDeprecate > len(validVpsInstances)-1 {
instancesToDeprecate = len(validVpsInstances) - 1
}
log.Printf("deprecating %d instances...", instancesToDeprecate)
underusageAmount := -overageFloat - float64(projectedUnderageAllowedBeforeTerminatingInstance)
if underusageAmount > 0 && len(validVpsInstances) > 1 {
instancesToDeprecate := int(math.Ceil(underusageAmount / float64(DEFAULT_INSTANCE_MONTHLY_BYTES)))
if instancesToDeprecate > len(validVpsInstances)-1 {
instancesToDeprecate = len(validVpsInstances) - 1
return false, nil
}
log.Printf("deprecating %d instances...", instancesToDeprecate)
return false, nil
}
// STEP 2: shuffle tenants around to balance load.
@ -821,193 +845,195 @@ func (app *BackendApp) tryRebalance() (bool, error) {
}
}
// 2.2 shuffle tenant allocations around so tenants
// are moved from instances with predicted overages to instances with predicted underages
if reshuffleTenants {
// 2.2 shuffle tenant allocations around so tenants
// are moved from instances with predicted overages to instances with predicted underages
iterations := 0
doneShuffling := false
for !doneShuffling && iterations < shufflingCircuitBreakerLimit && len(validVpsInstances) > 1 {
highestSurplus := vpsInstanceBytesTuple{instance: nil, bytes: 0}
lowestSurplus := vpsInstanceBytesTuple{instance: nil, bytes: 0}
for _, instance := range validVpsInstances {
if !instance.Deprecated && !instance.Deleted {
//TODO
amountOfMonthThatThisInstanceWillHaveBeenTurnedOnFor := float64(1)
// TODO handle BytesMonthly when node is created in the middle of the month / near end of month
totalMonthlyAllotment := int64(float64(instance.BytesMonthly) * amountOfMonthThatThisInstanceWillHaveBeenTurnedOnFor)
surplus := totalMonthlyAllotment - getInstanceProjectedUsage(
instance,
&workingAllocations,
&tenants,
desiredInstancesPerTenant,
amountOfMonthElapsed,
)
if highestSurplus.instance == nil || highestSurplus.bytes < surplus {
highestSurplus = vpsInstanceBytesTuple{instance: instance, bytes: surplus}
}
if lowestSurplus.instance == nil || lowestSurplus.bytes > surplus {
lowestSurplus = vpsInstanceBytesTuple{instance: instance, bytes: surplus}
}
}
}
iterations := 0
doneShuffling := false
for !doneShuffling && iterations < shufflingCircuitBreakerLimit && len(validVpsInstances) > 1 {
highestSurplus := vpsInstanceBytesTuple{instance: nil, bytes: 0}
lowestSurplus := vpsInstanceBytesTuple{instance: nil, bytes: 0}
// if there are no instances which have a predicted overage, exit.
if lowestSurplus.bytes >= 0 {
doneShuffling = true
break
}
for _, instance := range validVpsInstances {
if !instance.Deprecated && !instance.Deleted {
//TODO
amountOfMonthThatThisInstanceWillHaveBeenTurnedOnFor := float64(1)
// TODO handle BytesMonthly when node is created in the middle of the month / near end of month
totalMonthlyAllotment := int64(float64(instance.BytesMonthly) * amountOfMonthThatThisInstanceWillHaveBeenTurnedOnFor)
surplus := totalMonthlyAllotment - getInstanceProjectedUsage(
instance,
&workingAllocations,
&tenants,
desiredInstancesPerTenant,
amountOfMonthElapsed,
)
// lets say that the most overused instance has a predicted overusage of 100gb
// and the most underused instance has a predicted underusage of 700gb
// the difference between those is 800gb.
// so we want to move 400gb (half the difference) of estimated future traffic
// from the most overused to the most underused.
// if we did that, then the result would be balanced: both predicted to be underused by 300gb
desiredDifference := int64Abs(lowestSurplus.bytes-highestSurplus.bytes) / 2
// only continue shuffling if the difference between the two instances
// is over 20% of thier monthly allowance
// TODO handle BytesMonthly when node is created in the middle of the month / near end of month
averageMonthlyBytes := float64(highestSurplus.instance.BytesMonthly+lowestSurplus.instance.BytesMonthly) * float64(0.5)
if float64(desiredDifference) < averageMonthlyBytes*tenantShufflingThreshold {
doneShuffling = true
break
}
if highestSurplus.instance == nil || highestSurplus.bytes < surplus {
highestSurplus = vpsInstanceBytesTuple{instance: instance, bytes: surplus}
tenantsOfEmptiestInstance := workingAllocations[highestSurplus.instance.GetId()]
tenantsOfFullestInstance := workingAllocations[lowestSurplus.instance.GetId()]
// we are going to create a list of tenantId, instanceId pairs, one for each
// of the tenant allocations on each of the most underused and overused instances
// each list item also includes the net effect on the projected usage difference of the two instances
// assuming that the tenant was moved from one instance to the other
effectsOfMovingTenantToOtherInstance := []tenantMoveTuple{}
for tenantId := range tenantsOfEmptiestInstance {
tenantUsageShare := float64(tenants[tenantId].Bytes / int64(desiredInstancesPerTenant))
futureUsage := (tenantUsageShare / amountOfMonthElapsed) - tenantUsageShare
// only consider moving the tenant if it does not already exist on the destination
if !tenantsOfFullestInstance[tenantId] {
effectsOfMovingTenantToOtherInstance = append(
effectsOfMovingTenantToOtherInstance,
tenantMoveTuple{
tenantId: tenantId,
instanceId: highestSurplus.instance.GetId(),
bytes: int64(futureUsage * 2),
},
)
}
if lowestSurplus.instance == nil || lowestSurplus.bytes > surplus {
lowestSurplus = vpsInstanceBytesTuple{instance: instance, bytes: surplus}
}
for tenantId := range tenantsOfFullestInstance {
tenantUsageShare := float64(tenants[tenantId].Bytes / int64(desiredInstancesPerTenant))
futureUsage := (tenantUsageShare / amountOfMonthElapsed) - tenantUsageShare
// only consider moving the tenant if it does not already exist on the destination
if !tenantsOfEmptiestInstance[tenantId] {
effectsOfMovingTenantToOtherInstance = append(
effectsOfMovingTenantToOtherInstance,
tenantMoveTuple{
tenantId: tenantId,
instanceId: lowestSurplus.instance.GetId(),
bytes: int64(-futureUsage * 2),
},
)
}
}
}
// if there are no instances which have a predicted overage, exit.
if lowestSurplus.bytes >= 0 {
doneShuffling = true
break
}
// lets say that the most overused instance has a predicted overusage of 100gb
// and the most underused instance has a predicted underusage of 700gb
// the difference between those is 800gb.
// so we want to move 400gb (half the difference) of estimated future traffic
// from the most overused to the most underused.
// if we did that, then the result would be balanced: both predicted to be underused by 300gb
desiredDifference := int64Abs(lowestSurplus.bytes-highestSurplus.bytes) / 2
// only continue shuffling if the difference between the two instances
// is over 20% of thier monthly allowance
// TODO handle BytesMonthly when node is created in the middle of the month / near end of month
averageMonthlyBytes := float64(highestSurplus.instance.BytesMonthly+lowestSurplus.instance.BytesMonthly) * float64(0.5)
if float64(desiredDifference) < averageMonthlyBytes*tenantShufflingThreshold {
doneShuffling = true
break
}
// we constructed a list of all possible moves we could make to shuffle tenants between the two,
// now we use a heuristic method to find the best combination of moves which
// gets us closest to our desiredDifference.
// (weighted to also minimize the # of moves, based on knapsackShufflingMinimizationFactor)
// This is basically the knapsack problem: https://en.wikipedia.org/wiki/Knapsack_problem
tenantsOfEmptiestInstance := workingAllocations[highestSurplus.instance.GetId()]
tenantsOfFullestInstance := workingAllocations[lowestSurplus.instance.GetId()]
// we are going to create a list of tenantId, instanceId pairs, one for each
// of the tenant allocations on each of the most underused and overused instances
// each list item also includes the net effect on the projected usage difference of the two instances
// assuming that the tenant was moved from one instance to the other
effectsOfMovingTenantToOtherInstance := []tenantMoveTuple{}
for tenantId := range tenantsOfEmptiestInstance {
tenantUsageShare := float64(tenants[tenantId].Bytes / int64(desiredInstancesPerTenant))
futureUsage := (tenantUsageShare / amountOfMonthElapsed) - tenantUsageShare
// only consider moving the tenant if it does not already exist on the destination
if !tenantsOfFullestInstance[tenantId] {
effectsOfMovingTenantToOtherInstance = append(
effectsOfMovingTenantToOtherInstance,
tenantMoveTuple{
tenantId: tenantId,
instanceId: highestSurplus.instance.GetId(),
bytes: int64(futureUsage * 2),
},
)
}
}
for tenantId := range tenantsOfFullestInstance {
tenantUsageShare := float64(tenants[tenantId].Bytes / int64(desiredInstancesPerTenant))
futureUsage := (tenantUsageShare / amountOfMonthElapsed) - tenantUsageShare
// only consider moving the tenant if it does not already exist on the destination
if !tenantsOfEmptiestInstance[tenantId] {
effectsOfMovingTenantToOtherInstance = append(
effectsOfMovingTenantToOtherInstance,
tenantMoveTuple{
tenantId: tenantId,
instanceId: lowestSurplus.instance.GetId(),
bytes: int64(-futureUsage * 2),
},
)
positiveValue := int64(0)
negativeValue := int64(0)
for _, move := range effectsOfMovingTenantToOtherInstance {
if move.bytes > 0 {
positiveValue += move.bytes
} else {
negativeValue += move.bytes
}
}
}
// we constructed a list of all possible moves we could make to shuffle tenants between the two,
// now we use a heuristic method to find the best combination of moves which
// gets us closest to our desiredDifference.
// (weighted to also minimize the # of moves, based on knapsackShufflingMinimizationFactor)
// This is basically the knapsack problem: https://en.wikipedia.org/wiki/Knapsack_problem
positiveValue := int64(0)
negativeValue := int64(0)
for _, move := range effectsOfMovingTenantToOtherInstance {
if move.bytes > 0 {
positiveValue += move.bytes
} else {
negativeValue += move.bytes
bestGuessSoFar := knapsackGuess{
moves: []*tenantMoveTuple{},
distance: desiredDifference,
score: getKnapsackSolutionScore(desiredDifference, desiredDifference, 0),
}
}
bestGuessSoFar := knapsackGuess{
moves: []*tenantMoveTuple{},
distance: desiredDifference,
score: getKnapsackSolutionScore(desiredDifference, desiredDifference, 0),
}
numberOfAttempts := int(float64(len(effectsOfMovingTenantToOtherInstance)) * knapsackNumberOfGuessesFactor)
for attempt := 0; attempt < numberOfAttempts; attempt++ {
difference := int64(0)
moves := []*tenantMoveTuple{}
positiveTotal := positiveValue
negativeTotal := negativeValue
permutation := getRandomPermutation(len(effectsOfMovingTenantToOtherInstance))
for i := range effectsOfMovingTenantToOtherInstance {
index := permutation[i]
move := effectsOfMovingTenantToOtherInstance[index]
proposedDifference := difference + move.bytes
proposedDistance := int64Abs(proposedDifference - desiredDifference)
proposedScore := getKnapsackSolutionScore(desiredDifference, proposedDistance, len(moves)+1)
// if moving this tenant would push our current guess "too far" in the positive direction
if proposedDifference > desiredDifference {
// ok, we overshot... would it be possible to "walk it back"?
// or did we overshoot only a little bit & create a good solution?
impossibleToGoBack := proposedDifference+negativeTotal > desiredDifference
if impossibleToGoBack && proposedScore > bestGuessSoFar.score {
continue
numberOfAttempts := int(float64(len(effectsOfMovingTenantToOtherInstance)) * knapsackNumberOfGuessesFactor)
for attempt := 0; attempt < numberOfAttempts; attempt++ {
difference := int64(0)
moves := []*tenantMoveTuple{}
positiveTotal := positiveValue
negativeTotal := negativeValue
permutation := getRandomPermutation(len(effectsOfMovingTenantToOtherInstance))
for i := range effectsOfMovingTenantToOtherInstance {
index := permutation[i]
move := effectsOfMovingTenantToOtherInstance[index]
proposedDifference := difference + move.bytes
proposedDistance := int64Abs(proposedDifference - desiredDifference)
proposedScore := getKnapsackSolutionScore(desiredDifference, proposedDistance, len(moves)+1)
// if moving this tenant would push our current guess "too far" in the positive direction
if proposedDifference > desiredDifference {
// ok, we overshot... would it be possible to "walk it back"?
// or did we overshoot only a little bit & create a good solution?
impossibleToGoBack := proposedDifference+negativeTotal > desiredDifference
if impossibleToGoBack && proposedScore > bestGuessSoFar.score {
continue
}
}
}
// if moving this tenant would push our current guess "too far" in the negative direction
if proposedDifference < 0 {
impossibleToGoBack := proposedDifference+positiveTotal < 0
if impossibleToGoBack {
continue
// if moving this tenant would push our current guess "too far" in the negative direction
if proposedDifference < 0 {
impossibleToGoBack := proposedDifference+positiveTotal < 0
if impossibleToGoBack {
continue
}
}
}
difference = proposedDifference
moves = append(moves, &move)
if move.bytes > 0 {
positiveTotal -= move.bytes
} else {
negativeTotal -= move.bytes
}
if proposedScore < bestGuessSoFar.score {
bestGuessSoFar = knapsackGuess{
moves: moves,
distance: proposedDistance,
score: proposedScore,
difference = proposedDifference
moves = append(moves, &move)
if move.bytes > 0 {
positiveTotal -= move.bytes
} else {
negativeTotal -= move.bytes
}
if proposedScore < bestGuessSoFar.score {
bestGuessSoFar = knapsackGuess{
moves: moves,
distance: proposedDistance,
score: proposedScore,
}
}
}
}
}
if len(bestGuessSoFar.moves) == 0 {
doneShuffling = true
} else {
for _, move := range bestGuessSoFar.moves {
if move.instanceId == highestSurplus.instance.GetId() {
delete(workingAllocations[move.instanceId], move.tenantId)
workingAllocations[lowestSurplus.instance.GetId()][move.tenantId] = true
} else {
delete(workingAllocations[move.instanceId], move.tenantId)
workingAllocations[highestSurplus.instance.GetId()][move.tenantId] = true
if len(bestGuessSoFar.moves) == 0 {
doneShuffling = true
} else {
for _, move := range bestGuessSoFar.moves {
if move.instanceId == highestSurplus.instance.GetId() {
delete(workingAllocations[move.instanceId], move.tenantId)
workingAllocations[lowestSurplus.instance.GetId()][move.tenantId] = true
} else {
delete(workingAllocations[move.instanceId], move.tenantId)
workingAllocations[highestSurplus.instance.GetId()][move.tenantId] = true
}
}
}
}
}
if iterations == shufflingCircuitBreakerLimit {
return false, fmt.Errorf(
`something went wrong shuffling tenants shufflingCircuitBreakerLimit was reached (%d iterations)`,
shufflingCircuitBreakerLimit,
)
if iterations == shufflingCircuitBreakerLimit {
return false, fmt.Errorf(
`something went wrong shuffling tenants shufflingCircuitBreakerLimit was reached (%d iterations)`,
shufflingCircuitBreakerLimit,
)
}
}
// Step 3: Now that we have the workingAllocations shuffled to balance the load, we need to apply
@ -1145,7 +1171,7 @@ func (app *BackendApp) tryRebalance() (bool, error) {
return true, err
}
log.Println("rebalance complete!")
log.Println("reallocate complete!")
return true, nil
}


+ 2
- 2
frontend/admin.gotemplate.html View File

@ -11,8 +11,8 @@
<li>
<form method="POST" action="#">
<input type="hidden" name="hashOfSessionId" value="{{ .HashOfSessionId }}"/>
<input type="hidden" name="action" value="rebalance"/>
<input type="submit" value="rebalance"/>
<input type="hidden" name="action" value="reallocate"/>
<input type="submit" value="reallocate"/>
</form>
</li>
<li>


+ 4
- 4
frontend_admin_panel.go View File

@ -63,14 +63,14 @@ func registerAdminPanelRoutes(app *FrontendApp) {
return
}
}
} else if action == "rebalance" {
} else if action == "reallocate" {
app.setFlash(responseWriter, session, "info", "rebalance has been kicked off in the background\n")
app.setFlash(responseWriter, session, "info", "reallocate has been kicked off in the background\n")
go (func() {
err := app.Backend.Rebalance()
err := app.Backend.Reallocate(true, true)
if err != nil {
log.Printf("\nrebalance failed! %+v\n\n", err)
log.Printf("\reallocate failed! %+v\n\n", err)
}
})()


+ 8
- 1
frontend_profile.go View File

@ -84,7 +84,14 @@ func registerProfileRoutes(app *FrontendApp) {
return
}
// TODO trigger "rebalance" to re-setup everything
err = app.Backend.Reallocate(false, false)
if err != nil {
errorMessage := "unable to update your subdomain: internal server error"
log.Printf("%s: %+v", errorMessage, err)
app.setFlash(responseWriter, session, "error", errorMessage)
http.Redirect(responseWriter, request, "/profile", http.StatusFound)
return
}
successMessage := fmt.Sprintf("Success! Your personal subdomain is now '%s.%s'\n", postedFreeSubdomain, freeSubdomainDomain)
app.setFlash(responseWriter, session, "info", successMessage)


+ 9
- 7
scheduled_tasks.go View File

@ -30,10 +30,17 @@ func NewScheduledTasks(ingress *IngressService, backendApp *BackendApp, dbModel
func (tasks *ScheduledTasks) Initialize() error {
var err error
// Need to update the external domain validation first so that the subsequent reallocate will
// send the correct authorized domains to all of the threshold server instances
err = tasks.Register("validate-external-domains", DomainVerificationPollingInterval, func() error { return tasks.Backend.ValidateExternalDomains() })
if err != nil {
return errors.Wrap(err, "could not register validate-external-domains task: ")
}
// TODO this needs to run only if it hasn't ran recently.. ?
err = tasks.Register("rebalance", time.Hour, func() error { return tasks.Backend.Rebalance() })
err = tasks.Register("reallocate", time.Hour, func() error { return tasks.Backend.Reallocate(true, true) })
if err != nil {
return errors.Wrap(err, "could not register rebalance task: ")
return errors.Wrap(err, "could not register reallocate task: ")
}
err = tasks.Ingress.StartGreenhouseDaemon()
@ -92,11 +99,6 @@ func (tasks *ScheduledTasks) Initialize() error {
return errors.New("greenhouse-daemon never finished applying its config. Timed out after no activity for 10 seconds.")
}
err = tasks.Register("validate-external-domains", DomainVerificationPollingInterval, func() error { return tasks.Backend.ValidateExternalDomains() })
if err != nil {
return errors.Wrap(err, "could not register validate-external-domains task: ")
}
log.Println("🌱🏠 greenhouse has initialized successfully!")
go (func() {


Loading…
Cancel
Save