Browse Source

fleshing out tenant allocation balancing process

master
forest 8 months ago
parent
commit
01f93c93bb
3 changed files with 514 additions and 91 deletions
  1. +354
    -82
      backend.go
  2. +159
    -8
      db_model.go
  3. +1
    -1
      schema_versions/02_up_create_tenants_etc.sql

+ 354
- 82
backend.go View File

@ -1,6 +1,7 @@
package main
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
@ -8,10 +9,11 @@ import (
"io/ioutil"
"log"
"math"
"math/rand"
"net/http"
"time"
"github.com/pkg/errors"
errors "git.sequentialread.com/forest/pkg-errors"
)
type BackendApp struct {
@ -23,13 +25,8 @@ type BackendApp struct {
ThresholdManagementTlsClientConfig *tls.Config
}
type TenantSettings struct {
ReservedPorts []int
AuthorizedDomains []string
}
type VPSInstanceWithTenants struct {
TenantSettings map[string]TenantSettings
TenantSettings map[string]TunnelSettings
VPSInstance *VPSInstance
}
@ -41,7 +38,27 @@ type vpsInstanceBytesTuple struct {
instance *VPSInstance
bytes int64
}
type tenantMoveTuple struct {
tenantId int
instanceId string
bytes int64
}
type knapsackGuess struct {
moves []*tenantMoveTuple
distance int64
score int64
}
type taskResult struct {
Name string
Err error
Result interface{}
}
const tenantShufflingThreshold = float64(0.2)
const knapsackShufflingMinimizationFactor = float64(0.3)
const knapsackNumberOfGuessesFactor = float64(5)
const shufflingCircuitBreakerLimit = 1000
const GIGABYTE = int64(1000000000)
const TERABYTE = int64(1000000000000)
@ -136,8 +153,9 @@ func (app *BackendApp) monitorAndRebalance() (bool, error) {
// note that the total bandwidth on the tenants may be larger
// than the total bandwidth on the vps instances because some vps instances may have been deleted
tenantAllocationCount := map[int]int{}
shadowConfigs := map[string]map[int]*TenantSettings{}
shadowConfigs := map[string]map[int]*TunnelSettings{}
workingAllocations := map[string]map[int]bool{}
originalAllocations := map[string]map[int]bool{}
pinned := map[string]map[int]bool{}
for _, row := range tenantVpsInstanceRows {
vpsInstanceId := row.GetVPSInstanceId()
@ -152,8 +170,9 @@ func (app *BackendApp) monitorAndRebalance() (bool, error) {
if hasTenant && hasVPSInstance {
_, has := shadowConfigs[vpsInstanceId]
if !has {
shadowConfigs[vpsInstanceId] = map[int]*TenantSettings{}
shadowConfigs[vpsInstanceId] = map[int]*TunnelSettings{}
workingAllocations[vpsInstanceId] = map[int]bool{}
originalAllocations[vpsInstanceId] = map[int]bool{}
}
shadowConfigs[vpsInstanceId][tenant.Id] = row.ShadowConfig
@ -164,6 +183,7 @@ func (app *BackendApp) monitorAndRebalance() (bool, error) {
if row.Active {
workingAllocations[vpsInstanceId][tenant.Id] = true
originalAllocations[vpsInstanceId][tenant.Id] = true
tenantAllocationCount[row.TenantId]++
}
}
@ -306,9 +326,9 @@ func (app *BackendApp) monitorAndRebalance() (bool, error) {
iterations := 0
doneShuffling := false
for !doneShuffling && iterations < 1000 && len(validVpsInstances) > 1 {
highestSurplus := vpsInstanceBytesTuple{instance: nil, bytes: -1}
lowestSurplus := vpsInstanceBytesTuple{instance: nil, bytes: -1}
for !doneShuffling && iterations < shufflingCircuitBreakerLimit && len(validVpsInstances) > 1 {
highestSurplus := vpsInstanceBytesTuple{instance: nil, bytes: 0}
lowestSurplus := vpsInstanceBytesTuple{instance: nil, bytes: 0}
for _, instance := range validVpsInstances {
if !instance.Deprecated && !instance.Deleted {
@ -326,65 +346,256 @@ func (app *BackendApp) monitorAndRebalance() (bool, error) {
}
}
// swap some tenant allocation from lowestSurplus to highestSurplus.
}
// The tenant_vps_instance_pin rows act as a sort of "shadow config", for the tenants membership on vps.
// when we schedule tenants onto instances for any reason, we modify the "next config".
// Then when we are ready to apply the "next config", we do a diff against the current "shadow config"
// and make requests to each instance which has diffs.
// we have to
// a given vps instance (ip address) was associated with a DNS entry for a given tenant
// because we need to respect stale DNS caches
// actions := make([]func() taskResult, len(validVpsInstances))
// vpsInstancesByUrl := map[string]*VPSInstance{}
// for i, instance := range validVpsInstances {
// url := fmt.Sprintf("https://%s:%d/tenants", instance.IPV4, app.ThresholdManagementPort)
// actions[i] = func() taskResult {
// result, err := app.getTenantSettingsFromVpsInstance(url)
// return taskResult{
// Name: url,
// Err: err,
// Result: result,
// }
// }
// }
// results := doInParallel(actions...)
// for _, result := range results {
// if result.Err != nil {
// return result.Err
// }
// }
// existingTenantIds := map[string]bool{}
// existingVPSWithTenants := []VPSInstanceWithTenants{}
// for url, result := range results {
// tenants := result.Result.(map[string]TenantSettings)
// for tenantId := range tenants {
// existingTenantIds[tenantId] = true
// }
// existingVPSWithTenants = append(existingVPSWithTenants, VPSInstanceWithTenants{
// VPSInstance: vpsInstancesByUrl[url],
// TenantSettings: tenants,
// })
// }
// tenantsToAdd := []TenantInfo{}
// for _, tenant := range tenants {
// if _, has := existingTenantIds[strconv.Itoa(tenant.Id)]; !has {
// tenantsToAdd = append(tenantsToAdd, tenant)
// }
// }
// do rebalance
// if there are no instances which have a predicted overage, exit.
if lowestSurplus.bytes >= 0 {
doneShuffling = true
break
}
// lets say that the most overused instance has a predicted overusage of 100gb
// and the most underused instance has a predicted underusage of 700gb
// the difference between those is 800gb.
// so we want to move 400gb (half the difference) of estimated future traffic
// from the most overused to the most underused.
// if we did that, then the result would be balanced: both predicted to be underused by 300gb
desiredDifference := int64Abs(lowestSurplus.bytes-highestSurplus.bytes) / 2
// only continue shuffling if the difference between the two instances
// is over 20% of thier monthly allowance
averageMonthlyBytes := float64(highestSurplus.instance.BytesMonthly+lowestSurplus.instance.BytesMonthly) * float64(0.5)
if float64(desiredDifference) < averageMonthlyBytes*tenantShufflingThreshold {
doneShuffling = true
break
}
tenantsOfEmptiestInstance := workingAllocations[highestSurplus.instance.GetId()]
tenantsOfFullestInstance := workingAllocations[lowestSurplus.instance.GetId()]
// we are going to create a list of tenantId, instanceId pairs, one for each
// of the tenant allocations on each of the most underused and overused instances
// each list item also includes the net effect on the projected usage difference of the two instances
// assuming that the tenant was moved from one instance to the other
effectsOfMovingTenantToOtherInstance := []tenantMoveTuple{}
for tenantId := range tenantsOfEmptiestInstance {
tenantUsageShare := float64(tenants[tenantId].Bytes / int64(desiredInstancesPerTenant))
futureUsage := (tenantUsageShare / amountOfMonthElapsed) - tenantUsageShare
// only consider moving the tenant if it does not already exist on the destination
if !tenantsOfFullestInstance[tenantId] {
effectsOfMovingTenantToOtherInstance = append(
effectsOfMovingTenantToOtherInstance,
tenantMoveTuple{
tenantId: tenantId,
instanceId: highestSurplus.instance.GetId(),
bytes: int64(futureUsage * 2),
},
)
}
}
for tenantId := range tenantsOfFullestInstance {
tenantUsageShare := float64(tenants[tenantId].Bytes / int64(desiredInstancesPerTenant))
futureUsage := (tenantUsageShare / amountOfMonthElapsed) - tenantUsageShare
// only consider moving the tenant if it does not already exist on the destination
if !tenantsOfEmptiestInstance[tenantId] {
effectsOfMovingTenantToOtherInstance = append(
effectsOfMovingTenantToOtherInstance,
tenantMoveTuple{
tenantId: tenantId,
instanceId: lowestSurplus.instance.GetId(),
bytes: int64(-futureUsage * 2),
},
)
}
}
// we constructed a list of all possible moves we could make to shuffle tenants between the two,
// now we use a heuristic method to find the best combination of moves which
// gets us closest to our desiredDifference.
// (weighted to also minimize the # of moves, based on knapsackShufflingMinimizationFactor)
// This is basically the knapsack problem: https://en.wikipedia.org/wiki/Knapsack_problem
positiveValue := int64(0)
negativeValue := int64(0)
for _, move := range effectsOfMovingTenantToOtherInstance {
if move.bytes > 0 {
positiveValue += move.bytes
} else {
negativeValue += move.bytes
}
}
bestGuessSoFar := knapsackGuess{
moves: []*tenantMoveTuple{},
distance: desiredDifference,
score: getKnapsackSolutionScore(desiredDifference, desiredDifference, 0),
}
numberOfAttempts := int(float64(len(effectsOfMovingTenantToOtherInstance)) * knapsackNumberOfGuessesFactor)
for attempt := 0; attempt < numberOfAttempts; attempt++ {
difference := int64(0)
moves := []*tenantMoveTuple{}
positiveTotal := positiveValue
negativeTotal := negativeValue
permutation := getRandomPermutation(len(effectsOfMovingTenantToOtherInstance))
for i := range effectsOfMovingTenantToOtherInstance {
index := permutation[i]
move := effectsOfMovingTenantToOtherInstance[index]
proposedDifference := difference + move.bytes
proposedDistance := int64Abs(proposedDifference - desiredDifference)
proposedScore := getKnapsackSolutionScore(desiredDifference, proposedDistance, len(moves)+1)
// if moving this tenant would push our current guess "too far" in the positive direction
if proposedDifference > desiredDifference {
// ok, we overshot... would it be possible to "walk it back"?
// or did we overshoot only a little bit & create a good solution?
impossibleToGoBack := proposedDifference+negativeTotal > desiredDifference
if impossibleToGoBack && proposedScore > bestGuessSoFar.score {
continue
}
}
// if moving this tenant would push our current guess "too far" in the negative direction
if proposedDifference < 0 {
impossibleToGoBack := proposedDifference+positiveTotal < 0
if impossibleToGoBack {
continue
}
}
difference = proposedDifference
moves = append(moves, &move)
if move.bytes > 0 {
positiveTotal -= move.bytes
} else {
negativeTotal -= move.bytes
}
if proposedScore < bestGuessSoFar.score {
bestGuessSoFar = knapsackGuess{
moves: moves,
distance: proposedDistance,
score: proposedScore,
}
}
}
}
if len(bestGuessSoFar.moves) == 0 {
doneShuffling = true
} else {
for _, move := range bestGuessSoFar.moves {
if move.instanceId == highestSurplus.instance.GetId() {
delete(workingAllocations[move.instanceId], move.tenantId)
workingAllocations[lowestSurplus.instance.GetId()][move.tenantId] = true
} else {
delete(workingAllocations[move.instanceId], move.tenantId)
workingAllocations[highestSurplus.instance.GetId()][move.tenantId] = true
}
}
}
}
if iterations == shufflingCircuitBreakerLimit {
return false, fmt.Errorf(
`something went wrong shuffling tenants shufflingCircuitBreakerLimit was reached (%d iterations)`,
shufflingCircuitBreakerLimit,
)
}
// Step 3: Now that we have the workingAllocations shuffled to balance the load, we need to apply
// those changes to the instances.
// we store a "shadow config" in the database for the tenants membership on vps.
// Then when we are ready to apply a new configuration, we do a diff against the current "shadow config"
// and only make requests to each instance which has diffs.
existingConfig := map[string]map[int]*TunnelSettings{}
newConfig := map[string]map[int]*TunnelSettings{}
for instanceId, instanceAllocations := range originalAllocations {
for tenantId := range instanceAllocations {
settings, has := shadowConfigs[instanceId][tenantId]
if !has {
settings = &TunnelSettings{
AuthorizedDomains: []string{},
ReservedPorts: []PortRange{},
}
}
existingConfig[instanceId][tenantId] = settings
}
}
for instanceId, instanceAllocations := range workingAllocations {
for tenantId := range instanceAllocations {
newConfig[instanceId][tenantId] = tenants[tenantId].TunnelSettings
}
}
// go over the new and existing configs and mark any instanceIds which have differences
changedInstanceIds := map[string]bool{}
for instanceId, instanceTenants := range existingConfig {
if _, has := newConfig[instanceId]; !has {
changedInstanceIds[instanceId] = true
continue
}
newInstanceTenants := newConfig[instanceId]
for tenantId, tenantConfig := range instanceTenants {
newTenantConfig, has := newInstanceTenants[tenantId]
if !has || !newTenantConfig.DeepEquals(tenantConfig) {
changedInstanceIds[instanceId] = true
}
}
}
for instanceId, instanceTenants := range newConfig {
if _, has := existingConfig[instanceId]; !has {
changedInstanceIds[instanceId] = true
continue
}
existingInstanceTenants := newConfig[instanceId]
for tenantId, tenantConfig := range instanceTenants {
existingTenantConfig, has := existingInstanceTenants[tenantId]
if !has || !existingTenantConfig.DeepEquals(tenantConfig) {
changedInstanceIds[instanceId] = true
}
}
}
// we send PUT requests to all the instances that have config changes
actions := make([]func() taskResult, len(changedInstanceIds))
i := 0
for instanceId := range changedInstanceIds {
instance := validVpsInstances[instanceId]
actions[i] = func() taskResult {
err := app.saveVpsInstanceTenantSettings(billingYear, billingMonth, instance, newConfig[instanceId])
return taskResult{
Name: instanceId,
Err: err,
Result: nil,
}
}
i++
}
results := doInParallel(actions...)
for _, result := range results {
if result.Err != nil {
return true, result.Err
}
}
return true, nil
}
func (app *BackendApp) getTenantSettingsFromVpsInstance(url string) (map[string]TenantSettings, error) {
func (app *BackendApp) saveVpsInstanceTenantSettings(
billingYear int,
billingMonth int,
instance *VPSInstance,
config map[int]*TunnelSettings,
) error {
httpClient := http.Client{
Transport: &http.Transport{
@ -392,26 +603,63 @@ func (app *BackendApp) getTenantSettingsFromVpsInstance(url string) (map[string]
},
Timeout: 10 * time.Second,
}
request, err := http.NewRequest("GET", url, nil)
jsonBytes, err := json.Marshal(config)
url := fmt.Sprintf("https://%s:%d/tenants", instance.IPV4, app.ThresholdManagementPort)
request, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonBytes))
request.Header.Set("Content-Type", "application/json")
response, err := httpClient.Do(request)
if err != nil {
return nil, err
return err
}
if response.StatusCode != 200 {
return nil, fmt.Errorf("HTTP %d when calling %s (threshold multitenant management API)", response.StatusCode, url)
}
bytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, errors.Wrapf(err, "HTTP read error when calling %s (threshold multitenant management API)", url)
responseBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return errors.Wrapf(err, "HTTP read error when calling %s (threshold multitenant management API)", url)
}
return fmt.Errorf(
"HTTP %d when calling %s (threshold multitenant management API). response: %s",
response.StatusCode, url, string(responseBytes),
)
}
var responseObject map[string]TenantSettings
err = json.Unmarshal(bytes, &responseObject)
err = app.Model.SaveInstanceConfiguration(billingYear, billingMonth, instance, config)
if err != nil {
return nil, errors.Wrapf(err, "JSON parse error when calling %s (threshold multitenant management API)", url)
return errors.Wrapf(err, "Can't save instance configuration to database for %s", instance.GetId())
}
return responseObject, nil
return nil
}
// func (app *BackendApp) getTenantSettingsFromVpsInstance(url string) (map[string]TunnelSettings, error) {
// httpClient := http.Client{
// Transport: &http.Transport{
// TLSClientConfig: app.ThresholdManagementTlsClientConfig,
// },
// Timeout: 10 * time.Second,
// }
// request, err := http.NewRequest("GET", url, nil)
// response, err := httpClient.Do(request)
// if err != nil {
// return nil, err
// }
// if response.StatusCode != 200 {
// return nil, fmt.Errorf("HTTP %d when calling %s (threshold multitenant management API)", response.StatusCode, url)
// }
// bytes, err := ioutil.ReadAll(response.Body)
// if err != nil {
// return nil, errors.Wrapf(err, "HTTP read error when calling %s (threshold multitenant management API)", url)
// }
// var responseObject map[string]TunnelSettings
// err = json.Unmarshal(bytes, &responseObject)
// if err != nil {
// return nil, errors.Wrapf(err, "JSON parse error when calling %s (threshold multitenant management API)", url)
// }
// return responseObject, nil
// }
func doInParallel(actions ...func() taskResult) map[string]taskResult {
resultsChannel := make(chan taskResult)
@ -440,8 +688,32 @@ func doInParallel(actions ...func() taskResult) map[string]taskResult {
return results
}
type taskResult struct {
Name string
Err error
Result interface{}
func int64Abs(x int64) int64 {
if x > 0 {
return x
} else {
return -x
}
}
func getRandomPermutation(length int) []int {
toReturn := make([]int, length)
toAppend := make([]int, length)
for i := 0; i < length; i++ {
toAppend[i] = i
}
for i := 0; i < length; i++ {
picked := rand.Intn(length - i)
toReturn[i] = toAppend[picked]
toAppend[picked] = toAppend[length-i]
toAppend = toAppend[0 : length-i]
}
return toReturn
}
func getKnapsackSolutionScore(scale int64, proposedDistance int64, proposedLength int) int64 {
return proposedDistance + int64(float64(scale)*knapsackShufflingMinimizationFactor)*int64(proposedLength)
}

+ 159
- 8
db_model.go View File

@ -7,6 +7,8 @@ import (
"io/ioutil"
"log"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
@ -22,12 +24,33 @@ type DBModel struct {
DB *sql.DB
}
type PortRange struct {
Start int
End int
}
type TunnelSettings struct {
ReservedPorts []PortRange
AuthorizedDomains []string
}
func (settings *TunnelSettings) DeepEquals(other *TunnelSettings) bool {
if domainNamesToString(settings.AuthorizedDomains) != domainNamesToString(other.AuthorizedDomains) {
return false
}
if portRangesToString(settings.ReservedPorts) != portRangesToString(other.ReservedPorts) {
return false
}
return true
}
type TenantInfo struct {
Id int
Created time.Time
DedicatedVPSCount int
Bytes int64
ServiceLimitCents int
TunnelSettings *TenantSettings
Deactivated bool
}
@ -41,6 +64,8 @@ type TenantVPSInstance struct {
DeactivatedAt *time.Time
}
const DomainVerificationPollingInterval = time.Hour
func (i *TenantVPSInstance) GetVPSInstanceId() string {
return fmt.Sprintf("%s_%s", i.ServiceProvider, i.ProviderInstanceId)
}
@ -306,13 +331,56 @@ func (model *DBModel) GetVPSInstances() (map[string]*VPSInstance, error) {
}
func (model *DBModel) GetTenants() (map[int]*TenantInfo, error) {
rows, err := model.DB.Query(`
SELECT
tenants.id,
tenants.created,
tenants.service_limit_cents
FROM tenants
`)
rows, err := model.DB.Query(`SELECT tenant_id, port_start, port_end FROM reserved_ports`)
if err != nil {
return nil, err
}
reservedPorts := map[int][]PortRange{}
for rows.Next() {
var tenantId int
var portStart int
var portEnd int
err := rows.Scan(&tenantId, &portStart, &portEnd)
if err != nil {
return nil, err
}
portRange := PortRange{Start: portStart, End: portEnd}
if _, has := reservedPorts[tenantId]; !has {
reservedPorts[tenantId] = []PortRange{portRange}
} else {
reservedPorts[tenantId] = append(reservedPorts[tenantId], portRange)
}
}
rows, err = model.DB.Query(`SELECT tenant_id, domain_name, last_verified FROM external_domains`)
if err != nil {
return nil, err
}
verificationCutoff := time.Now().Add(-(DomainVerificationPollingInterval + time.Minute))
authorizedDomains := map[int][]string{}
for rows.Next() {
var tenantId int
var domainName string
var lastVerified time.Time
err := rows.Scan(&tenantId, &domainName, &lastVerified)
if err != nil {
return nil, err
}
if lastVerified.After(verificationCutoff) {
if _, has := authorizedDomains[tenantId]; !has {
authorizedDomains[tenantId] = []string{domainName}
} else {
authorizedDomains[tenantId] = append(authorizedDomains[tenantId], domainName)
}
}
}
rows, err = model.DB.Query(`SELECT id, created, subdomain, service_limit_cents FROM tenants`)
if err != nil {
return nil, err
@ -322,17 +390,34 @@ func (model *DBModel) GetTenants() (map[int]*TenantInfo, error) {
for rows.Next() {
var tenantId int
var tenantCreated time.Time
var subdomain string
var serviceLimitCents int
err := rows.Scan(&tenantId, &tenantCreated, &serviceLimitCents)
var tunnelSettings *TunnelSettings
err := rows.Scan(&tenantId, &tenantCreated, subdomain, &serviceLimitCents)
if err != nil {
return nil, err
}
thisTenantDomains := authorizedDomains[tenantId]
if thisTenantDomains == nil {
thisTenantDomains = []string{}
}
thisTenantPorts := reservedPorts[tenantId]
if thisTenantPorts == nil {
thisTenantPorts = []PortRange{}
}
toReturn[tenantId] = &TenantInfo{
Id: tenantId,
Created: tenantCreated,
ServiceLimitCents: serviceLimitCents,
TunnelSettings: &TunnelSettings{
AuthorizedDomains: append(thisTenantDomains, subdomain),
ReservedPorts: thisTenantPorts,
},
}
}
return toReturn, nil
@ -394,6 +479,58 @@ func (model *DBModel) GetTenantVPSInstanceRows(billingYear, billingMonth int) ([
return toReturn, nil
}
func (model *DBModel) SaveInstanceConfiguration(
billingYear int,
billingMonth int,
instance *VPSInstance,
config map[int]*TunnelSettings,
) error {
// first we set shadow_config & active=true for all tenants mentioned in the config
tenantIds := []int{}
for tenantId, tunnelSettings := range config {
tenantIds = append(tenantIds, tenantId)
shadowConfigBytes, err := json.Marshal(tunnelSettings)
shadowConfig := string(shadowConfigBytes)
if err != nil {
return errors.Wrapf(err, "cant serialize shadow config for tenant %s on %s", tenantId, instance.GetId())
}
model.DB.Exec(`
INSERT INTO tenant_vps_instance (
billing_year, billing_month, tenant_id, service_provider, provider_instance_id
shadow_config, active
)
VALUES($1, $2, $3, $4, $5,
$6, $7)
ON CONFLICT (primary_key)
DO
UPDATE SET shadow_config = $6, active = $7;
`,
billingYear, billingMonth, tenantId, instance.ServiceProvider, instance.ProviderInstanceId,
shadowConfig, true, time.Now().UTC(),
)
}
// next, we disable all existing tenants for this instance which are not mentioned in the config
tenantIdsStrings := make([]string, len(tenantIds))
for i, id := range tenantIds {
tenantIdsStrings[i] = strconv.Itoa(id)
}
model.DB.Exec(
fmt.Sprintf(`
UPDATE tenant_vps_instance SET active = FALSE, deactivated_at = NOW()
WHERE billing_year = $1 AND billing_month = $2 AND service_provider = $3 AND provider_instance_id = $4
AND tenant_id NOT IN (%s)
`, strings.Join(tenantIdsStrings, ", ")),
billingYear, billingMonth, instance.ServiceProvider, instance.ProviderInstanceId,
)
}
func (model *DBModel) rowToVPSInstance(row RowScanner) (*VPSInstance, error) {
var serviceProvider string
@ -422,3 +559,17 @@ func (model *DBModel) rowToVPSInstance(row RowScanner) (*VPSInstance, error) {
Deleted: deleted,
}, nil
}
func domainNamesToString(slice []string) string {
sort.Strings(strings)
return strings.Join(strings, ",")
}
func portRangesToString(slice []PortRange) string {
strings := make([]string, len(slice))
for i, portRange := range slice {
strings[i] = fmt.Sprintf("%d:%d", portRange.Start, portRange.End)
}
sort.Strings(strings)
return strings.Join(strings, ",")
}

+ 1
- 1
schema_versions/02_up_create_tenants_etc.sql View File

@ -47,7 +47,7 @@ CREATE TABLE tenant_vps_instance (
CONSTRAINT vps_instance
FOREIGN KEY(service_provider, provider_instance_id)
REFERENCES vps_instances(service_provider, provider_instance_id) ON DELETE RESTRICT,
PRIMARY KEY (billing_year, billing_month, tenant_id, service_provider, provider_instance_id)
CONSTRAINT primary_key PRIMARY KEY (billing_year, billing_month, tenant_id, service_provider, provider_instance_id)
);
CREATE TABLE tenant_metrics_bandwidth (


Loading…
Cancel
Save