Merge branch 'master' of github.com:uber/terraform into issue-4895
This commit is contained in:
commit
6e46ef1d46
|
@ -30,10 +30,12 @@ BUG FIXES:
|
|||
* core: Fix race condition when handling tainted resource destroys [GH-5026]
|
||||
* provider/openstack: Fix crash when `access_network` was not defined in instances [GH-4966]
|
||||
* provider/aws: Fix DynamoDB Table Refresh to ensure deleted tables are removed from state [GH-4943]
|
||||
* provider/aws: Fix issue applying security group changes in EC2 Classic RDS for aws_db_instance [GH-4969]
|
||||
* provider/cloudflare: `ttl` no longer shows a change on each plan on `cloudflare_record` resources [GH-5042]
|
||||
* provider/aws: Fix reading auto scaling group load balancers [GH-5045]
|
||||
* provider/aws: Fix reading auto scaling group availability zones [GH-5044]
|
||||
* provider/google: Fix backend service max_utilization attribute [GH-4895]
|
||||
* provider/docker: Fix the default docker_host value [GH-5088]
|
||||
* provider/google: Fix backend service max_utilization attribute [GH-5075]
|
||||
|
||||
## 0.6.11 (February 1, 2016)
|
||||
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
)
|
||||
|
||||
func makeAwsStringList(in []interface{}) []*string {
|
||||
ret := make([]*string, len(in), len(in))
|
||||
for i := 0; i < len(in); i++ {
|
||||
ret[i] = aws.String(in[i].(string))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func makeAwsStringSet(in *schema.Set) []*string {
|
||||
inList := in.List()
|
||||
ret := make([]*string, len(inList), len(inList))
|
||||
for i := 0; i < len(ret); i++ {
|
||||
ret[i] = aws.String(inList[i].(string))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func unwrapAwsStringList(in []*string) []string {
|
||||
ret := make([]string, len(in), len(in))
|
||||
for i := 0; i < len(in); i++ {
|
||||
if in[i] != nil {
|
||||
ret[i] = *in[i]
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
|
@ -271,11 +271,11 @@ func (lt *opsworksLayerType) Read(d *schema.ResourceData, client *opsworks.OpsWo
|
|||
d.Set("auto_assign_elastic_ips", layer.AutoAssignElasticIps)
|
||||
d.Set("auto_assign_public_ips", layer.AutoAssignPublicIps)
|
||||
d.Set("custom_instance_profile_arn", layer.CustomInstanceProfileArn)
|
||||
d.Set("custom_security_group_ids", unwrapAwsStringList(layer.CustomSecurityGroupIds))
|
||||
d.Set("custom_security_group_ids", flattenStringList(layer.CustomSecurityGroupIds))
|
||||
d.Set("auto_healing", layer.EnableAutoHealing)
|
||||
d.Set("install_updates_on_boot", layer.InstallUpdatesOnBoot)
|
||||
d.Set("name", layer.Name)
|
||||
d.Set("system_packages", unwrapAwsStringList(layer.Packages))
|
||||
d.Set("system_packages", flattenStringList(layer.Packages))
|
||||
d.Set("stack_id", layer.StackId)
|
||||
d.Set("use_ebs_optimized_instances", layer.UseEbsOptimizedInstances)
|
||||
|
||||
|
@ -298,12 +298,12 @@ func (lt *opsworksLayerType) Create(d *schema.ResourceData, client *opsworks.Ops
|
|||
AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)),
|
||||
CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)),
|
||||
CustomRecipes: lt.CustomRecipes(d),
|
||||
CustomSecurityGroupIds: makeAwsStringSet(d.Get("custom_security_group_ids").(*schema.Set)),
|
||||
CustomSecurityGroupIds: expandStringSet(d.Get("custom_security_group_ids").(*schema.Set)),
|
||||
EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)),
|
||||
InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)),
|
||||
LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d),
|
||||
Name: aws.String(d.Get("name").(string)),
|
||||
Packages: makeAwsStringSet(d.Get("system_packages").(*schema.Set)),
|
||||
Packages: expandStringSet(d.Get("system_packages").(*schema.Set)),
|
||||
Type: aws.String(lt.TypeName),
|
||||
StackId: aws.String(d.Get("stack_id").(string)),
|
||||
UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)),
|
||||
|
@ -339,12 +339,12 @@ func (lt *opsworksLayerType) Update(d *schema.ResourceData, client *opsworks.Ops
|
|||
AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)),
|
||||
CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)),
|
||||
CustomRecipes: lt.CustomRecipes(d),
|
||||
CustomSecurityGroupIds: makeAwsStringSet(d.Get("custom_security_group_ids").(*schema.Set)),
|
||||
CustomSecurityGroupIds: expandStringSet(d.Get("custom_security_group_ids").(*schema.Set)),
|
||||
EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)),
|
||||
InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)),
|
||||
LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d),
|
||||
Name: aws.String(d.Get("name").(string)),
|
||||
Packages: makeAwsStringSet(d.Get("system_packages").(*schema.Set)),
|
||||
Packages: expandStringSet(d.Get("system_packages").(*schema.Set)),
|
||||
UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)),
|
||||
Attributes: lt.AttributeMap(d),
|
||||
VolumeConfigurations: lt.VolumeConfigurations(d),
|
||||
|
@ -467,11 +467,11 @@ func (lt *opsworksLayerType) SetLifecycleEventConfiguration(d *schema.ResourceDa
|
|||
|
||||
func (lt *opsworksLayerType) CustomRecipes(d *schema.ResourceData) *opsworks.Recipes {
|
||||
return &opsworks.Recipes{
|
||||
Configure: makeAwsStringList(d.Get("custom_configure_recipes").([]interface{})),
|
||||
Deploy: makeAwsStringList(d.Get("custom_deploy_recipes").([]interface{})),
|
||||
Setup: makeAwsStringList(d.Get("custom_setup_recipes").([]interface{})),
|
||||
Shutdown: makeAwsStringList(d.Get("custom_shutdown_recipes").([]interface{})),
|
||||
Undeploy: makeAwsStringList(d.Get("custom_undeploy_recipes").([]interface{})),
|
||||
Configure: expandStringList(d.Get("custom_configure_recipes").([]interface{})),
|
||||
Deploy: expandStringList(d.Get("custom_deploy_recipes").([]interface{})),
|
||||
Setup: expandStringList(d.Get("custom_setup_recipes").([]interface{})),
|
||||
Shutdown: expandStringList(d.Get("custom_shutdown_recipes").([]interface{})),
|
||||
Undeploy: expandStringList(d.Get("custom_undeploy_recipes").([]interface{})),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -487,11 +487,11 @@ func (lt *opsworksLayerType) SetCustomRecipes(d *schema.ResourceData, v *opswork
|
|||
return
|
||||
}
|
||||
|
||||
d.Set("custom_configure_recipes", unwrapAwsStringList(v.Configure))
|
||||
d.Set("custom_deploy_recipes", unwrapAwsStringList(v.Deploy))
|
||||
d.Set("custom_setup_recipes", unwrapAwsStringList(v.Setup))
|
||||
d.Set("custom_shutdown_recipes", unwrapAwsStringList(v.Shutdown))
|
||||
d.Set("custom_undeploy_recipes", unwrapAwsStringList(v.Undeploy))
|
||||
d.Set("custom_configure_recipes", flattenStringList(v.Configure))
|
||||
d.Set("custom_deploy_recipes", flattenStringList(v.Deploy))
|
||||
d.Set("custom_setup_recipes", flattenStringList(v.Setup))
|
||||
d.Set("custom_shutdown_recipes", flattenStringList(v.Shutdown))
|
||||
d.Set("custom_undeploy_recipes", flattenStringList(v.Undeploy))
|
||||
}
|
||||
|
||||
func (lt *opsworksLayerType) VolumeConfigurations(d *schema.ResourceData) []*opsworks.VolumeConfiguration {
|
||||
|
|
|
@ -369,12 +369,20 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
|
|||
opts.StorageType = aws.String(attr.(string))
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] DB Instance restore from snapshot configuration: %s", opts)
|
||||
_, err := conn.RestoreDBInstanceFromDBSnapshot(&opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating DB Instance: %s", err)
|
||||
}
|
||||
|
||||
var sgUpdate bool
|
||||
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
|
||||
sgUpdate = true
|
||||
}
|
||||
if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
|
||||
sgUpdate = true
|
||||
}
|
||||
if sgUpdate {
|
||||
log.Printf("[INFO] DB is restoring from snapshot with default security, but custom security should be set, will now update after snapshot is restored!")
|
||||
|
||||
// wait for instance to get up and then modify security
|
||||
|
@ -775,7 +783,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
|
|||
requestUpdate = true
|
||||
}
|
||||
|
||||
if d.HasChange("vpc_security_group_ids") {
|
||||
if d.HasChange("security_group_names") {
|
||||
if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
|
||||
var s []*string
|
||||
for _, v := range attr.List() {
|
||||
|
|
|
@ -355,7 +355,7 @@ func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) er
|
|||
}
|
||||
}
|
||||
|
||||
// Create the new records
|
||||
// Change batch for deleting
|
||||
changeBatch := &route53.ChangeBatch{
|
||||
Comment: aws.String("Deleted by Terraform"),
|
||||
Changes: []*route53.Change{
|
||||
|
|
|
@ -258,6 +258,58 @@ func TestAccAWSRoute53Record_TypeChange(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
// Test record deletion out of band and make sure we render a new plan
|
||||
// Part of regression test(s) for https://github.com/hashicorp/terraform/pull/4892
|
||||
func TestAccAWSRoute53Record_planUpdate(t *testing.T) {
|
||||
var zone route53.GetHostedZoneOutput
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRoute53RecordDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccRoute53RecordConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRoute53RecordExists("aws_route53_record.default"),
|
||||
testAccCheckRoute53ZoneExists("aws_route53_zone.main", &zone),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccRoute53RecordConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRoute53DeleteRecord("aws_route53_record.default", &zone),
|
||||
),
|
||||
ExpectNonEmptyPlan: true,
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccRoute53RecordNoConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRoute53ZoneExists("aws_route53_zone.main", &zone),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckRoute53DeleteRecord(n string, zone *route53.GetHostedZoneOutput) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No hosted zone ID is set")
|
||||
}
|
||||
|
||||
// Manually set the weight to 0 to replicate a record created in Terraform
|
||||
// pre-0.6.9
|
||||
rs.Primary.Attributes["weight"] = "0"
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckRoute53RecordDestroy(s *terraform.State) error {
|
||||
conn := testAccProvider.Meta().(*AWSClient).r53conn
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
|
@ -354,6 +406,12 @@ resource "aws_route53_record" "default" {
|
|||
}
|
||||
`
|
||||
|
||||
const testAccRoute53RecordNoConfig = `
|
||||
resource "aws_route53_zone" "main" {
|
||||
name = "notexample.com"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccRoute53RecordConfigSuffix = `
|
||||
resource "aws_route53_zone" "main" {
|
||||
name = "notexample.com"
|
||||
|
|
|
@ -470,6 +470,11 @@ func expandStringList(configured []interface{}) []*string {
|
|||
return vs
|
||||
}
|
||||
|
||||
// Takes the result of schema.Set of strings and returns a []*string
|
||||
func expandStringSet(configured *schema.Set) []*string {
|
||||
return expandStringList(configured.List())
|
||||
}
|
||||
|
||||
// Takes list of pointers to strings. Expand to an array
|
||||
// of raw strings and returns a []interface{}
|
||||
// to keep compatibility w/ schema.NewSetschema.NewSet
|
||||
|
|
|
@ -13,7 +13,7 @@ func Provider() terraform.ResourceProvider {
|
|||
"host": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("DOCKER_HOST", "unix:/run/docker.sock"),
|
||||
DefaultFunc: schema.EnvDefaultFunc("DOCKER_HOST", "unix:///var/run/docker.sock"),
|
||||
Description: "The Docker daemon address",
|
||||
},
|
||||
|
||||
|
|
|
@ -280,6 +280,32 @@ func TestResourceProvider_linuxCreateConfigFiles(t *testing.T) {
|
|||
`"subkey2b":{"subkey3":"value3"}}},"key2":"value2","run_list":["cookbook::recipe"]}`,
|
||||
},
|
||||
},
|
||||
|
||||
"Attributes JSON": {
|
||||
Config: testConfig(t, map[string]interface{}{
|
||||
"attributes_json": `{"key1":{"subkey1":{"subkey2a":["val1","val2","val3"],` +
|
||||
`"subkey2b":{"subkey3":"value3"}}},"key2":"value2"}`,
|
||||
"node_name": "nodename1",
|
||||
"prevent_sudo": true,
|
||||
"run_list": []interface{}{"cookbook::recipe"},
|
||||
"secret_key_path": "test-fixtures/encrypted_data_bag_secret",
|
||||
"server_url": "https://chef.local",
|
||||
"validation_client_name": "validator",
|
||||
"validation_key_path": "test-fixtures/validator.pem",
|
||||
}),
|
||||
|
||||
Commands: map[string]bool{
|
||||
"mkdir -p " + linuxConfDir: true,
|
||||
},
|
||||
|
||||
Uploads: map[string]string{
|
||||
linuxConfDir + "/client.rb": defaultLinuxClientConf,
|
||||
linuxConfDir + "/encrypted_data_bag_secret": "SECRET-KEY-FILE",
|
||||
linuxConfDir + "/validation.pem": "VALIDATOR-PEM-FILE",
|
||||
linuxConfDir + "/first-boot.json": `{"key1":{"subkey1":{"subkey2a":["val1","val2","val3"],` +
|
||||
`"subkey2b":{"subkey3":"value3"}}},"key2":"value2","run_list":["cookbook::recipe"]}`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
r := new(ResourceProvisioner)
|
||||
|
|
|
@ -24,16 +24,18 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
clienrb = "client.rb"
|
||||
defaultEnv = "_default"
|
||||
firstBoot = "first-boot.json"
|
||||
logfileDir = "logfiles"
|
||||
linuxChefCmd = "chef-client"
|
||||
linuxConfDir = "/etc/chef"
|
||||
secretKey = "encrypted_data_bag_secret"
|
||||
validationKey = "validation.pem"
|
||||
windowsChefCmd = "cmd /c chef-client"
|
||||
windowsConfDir = "C:/chef"
|
||||
clienrb = "client.rb"
|
||||
defaultEnv = "_default"
|
||||
firstBoot = "first-boot.json"
|
||||
logfileDir = "logfiles"
|
||||
linuxChefCmd = "chef-client"
|
||||
linuxKnifeCmd = "knife"
|
||||
linuxConfDir = "/etc/chef"
|
||||
secretKey = "encrypted_data_bag_secret"
|
||||
validationKey = "validation.pem"
|
||||
windowsChefCmd = "cmd /c chef-client"
|
||||
windowsKnifeCmd = "cmd /c knife"
|
||||
windowsConfDir = "C:/chef"
|
||||
)
|
||||
|
||||
const clientConf = `
|
||||
|
@ -74,34 +76,37 @@ ENV['no_proxy'] = "{{ join .NOProxy "," }}"
|
|||
|
||||
// Provisioner represents a specificly configured chef provisioner
|
||||
type Provisioner struct {
|
||||
Attributes interface{} `mapstructure:"attributes"`
|
||||
ClientOptions []string `mapstructure:"client_options"`
|
||||
DisableReporting bool `mapstructure:"disable_reporting"`
|
||||
Environment string `mapstructure:"environment"`
|
||||
LogToFile bool `mapstructure:"log_to_file"`
|
||||
UsePolicyfile bool `mapstructure:"use_policyfile"`
|
||||
PolicyGroup string `mapstructure:"policy_group"`
|
||||
PolicyName string `mapstructure:"policy_name"`
|
||||
HTTPProxy string `mapstructure:"http_proxy"`
|
||||
HTTPSProxy string `mapstructure:"https_proxy"`
|
||||
NOProxy []string `mapstructure:"no_proxy"`
|
||||
NodeName string `mapstructure:"node_name"`
|
||||
OhaiHints []string `mapstructure:"ohai_hints"`
|
||||
OSType string `mapstructure:"os_type"`
|
||||
PreventSudo bool `mapstructure:"prevent_sudo"`
|
||||
RunList []string `mapstructure:"run_list"`
|
||||
SecretKey string `mapstructure:"secret_key"`
|
||||
ServerURL string `mapstructure:"server_url"`
|
||||
SkipInstall bool `mapstructure:"skip_install"`
|
||||
SSLVerifyMode string `mapstructure:"ssl_verify_mode"`
|
||||
ValidationClientName string `mapstructure:"validation_client_name"`
|
||||
ValidationKey string `mapstructure:"validation_key"`
|
||||
Version string `mapstructure:"version"`
|
||||
Attributes interface{} `mapstructure:"attributes"`
|
||||
AttributesJSON string `mapstructure:"attributes_json"`
|
||||
ClientOptions []string `mapstructure:"client_options"`
|
||||
DisableReporting bool `mapstructure:"disable_reporting"`
|
||||
Environment string `mapstructure:"environment"`
|
||||
FetchChefCertificates bool `mapstructure:"fetch_chef_certificates"`
|
||||
LogToFile bool `mapstructure:"log_to_file"`
|
||||
UsePolicyfile bool `mapstructure:"use_policyfile"`
|
||||
PolicyGroup string `mapstructure:"policy_group"`
|
||||
PolicyName string `mapstructure:"policy_name"`
|
||||
HTTPProxy string `mapstructure:"http_proxy"`
|
||||
HTTPSProxy string `mapstructure:"https_proxy"`
|
||||
NOProxy []string `mapstructure:"no_proxy"`
|
||||
NodeName string `mapstructure:"node_name"`
|
||||
OhaiHints []string `mapstructure:"ohai_hints"`
|
||||
OSType string `mapstructure:"os_type"`
|
||||
PreventSudo bool `mapstructure:"prevent_sudo"`
|
||||
RunList []string `mapstructure:"run_list"`
|
||||
SecretKey string `mapstructure:"secret_key"`
|
||||
ServerURL string `mapstructure:"server_url"`
|
||||
SkipInstall bool `mapstructure:"skip_install"`
|
||||
SSLVerifyMode string `mapstructure:"ssl_verify_mode"`
|
||||
ValidationClientName string `mapstructure:"validation_client_name"`
|
||||
ValidationKey string `mapstructure:"validation_key"`
|
||||
Version string `mapstructure:"version"`
|
||||
|
||||
installChefClient func(terraform.UIOutput, communicator.Communicator) error
|
||||
createConfigFiles func(terraform.UIOutput, communicator.Communicator) error
|
||||
runChefClient func(terraform.UIOutput, communicator.Communicator) error
|
||||
useSudo bool
|
||||
installChefClient func(terraform.UIOutput, communicator.Communicator) error
|
||||
createConfigFiles func(terraform.UIOutput, communicator.Communicator) error
|
||||
fetchChefCertificates func(terraform.UIOutput, communicator.Communicator) error
|
||||
runChefClient func(terraform.UIOutput, communicator.Communicator) error
|
||||
useSudo bool
|
||||
|
||||
// Deprecated Fields
|
||||
SecretKeyPath string `mapstructure:"secret_key_path"`
|
||||
|
@ -138,11 +143,13 @@ func (r *ResourceProvisioner) Apply(
|
|||
case "linux":
|
||||
p.installChefClient = p.linuxInstallChefClient
|
||||
p.createConfigFiles = p.linuxCreateConfigFiles
|
||||
p.fetchChefCertificates = p.fetchChefCertificatesFunc(linuxChefCmd, linuxConfDir)
|
||||
p.runChefClient = p.runChefClientFunc(linuxChefCmd, linuxConfDir)
|
||||
p.useSudo = !p.PreventSudo && s.Ephemeral.ConnInfo["user"] != "root"
|
||||
case "windows":
|
||||
p.installChefClient = p.windowsInstallChefClient
|
||||
p.createConfigFiles = p.windowsCreateConfigFiles
|
||||
p.fetchChefCertificates = p.fetchChefCertificatesFunc(windowsChefCmd, windowsConfDir)
|
||||
p.runChefClient = p.runChefClientFunc(windowsChefCmd, windowsConfDir)
|
||||
p.useSudo = false
|
||||
default:
|
||||
|
@ -176,6 +183,13 @@ func (r *ResourceProvisioner) Apply(
|
|||
return err
|
||||
}
|
||||
|
||||
if p.FetchChefCertificates {
|
||||
o.Output("Fetch Chef certificates...")
|
||||
if err := p.fetchChefCertificates(o, comm); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
o.Output("Starting initial Chef-Client run...")
|
||||
if err := p.runChefClient(o, comm); err != nil {
|
||||
return err
|
||||
|
@ -222,6 +236,10 @@ func (r *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string
|
|||
ws = append(ws, "secret_key_path is deprecated, please use "+
|
||||
"secret_key instead and load the key contents via file()")
|
||||
}
|
||||
if _, ok := c.Config["attributes"]; ok {
|
||||
ws = append(ws, "using map style attribute values is deprecated, "+
|
||||
" please use a single raw JSON string instead")
|
||||
}
|
||||
|
||||
return ws, es
|
||||
}
|
||||
|
@ -286,6 +304,14 @@ func (r *ResourceProvisioner) decodeConfig(c *terraform.ResourceConfig) (*Provis
|
|||
}
|
||||
}
|
||||
|
||||
if attrs, ok := c.Config["attributes_json"]; ok {
|
||||
var m map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(attrs.(string)), &m); err != nil {
|
||||
return nil, fmt.Errorf("Error parsing the attributes: %v", err)
|
||||
}
|
||||
p.Attributes = m
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
|
@ -306,7 +332,7 @@ func rawToJSON(raw interface{}) (interface{}, error) {
|
|||
|
||||
return s[0], nil
|
||||
default:
|
||||
return raw, nil
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -328,6 +354,17 @@ func retryFunc(timeout time.Duration, f func() error) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *Provisioner) fetchChefCertificatesFunc(
|
||||
knifeCmd string,
|
||||
confDir string) func(terraform.UIOutput, communicator.Communicator) error {
|
||||
return func(o terraform.UIOutput, comm communicator.Communicator) error {
|
||||
clientrb := path.Join(confDir, clienrb)
|
||||
cmd := fmt.Sprintf("%s ssl fetch -c %s", knifeCmd, clientrb)
|
||||
|
||||
return p.runCommand(o, comm, cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provisioner) runChefClientFunc(
|
||||
chefCmd string,
|
||||
confDir string) func(terraform.UIOutput, communicator.Communicator) error {
|
||||
|
|
|
@ -16,7 +16,6 @@ func TestResourceProvisioner_impl(t *testing.T) {
|
|||
|
||||
func TestResourceProvider_Validate_good(t *testing.T) {
|
||||
c := testConfig(t, map[string]interface{}{
|
||||
"attributes": []interface{}{"key1 { subkey1 = value1 }"},
|
||||
"environment": "_default",
|
||||
"node_name": "nodename1",
|
||||
"run_list": []interface{}{"cookbook::recipe"},
|
||||
|
@ -149,3 +148,76 @@ func TestResourceProvider_runChefClient(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceProvider_fetchChefCertificates(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
Config *terraform.ResourceConfig
|
||||
KnifeCmd string
|
||||
ConfDir string
|
||||
Commands map[string]bool
|
||||
}{
|
||||
"Sudo": {
|
||||
Config: testConfig(t, map[string]interface{}{
|
||||
"fetch_chef_certificates": true,
|
||||
"node_name": "nodename1",
|
||||
"run_list": []interface{}{"cookbook::recipe"},
|
||||
"server_url": "https://chef.local",
|
||||
"validation_client_name": "validator",
|
||||
"validation_key_path": "test-fixtures/validator.pem",
|
||||
}),
|
||||
|
||||
KnifeCmd: linuxKnifeCmd,
|
||||
|
||||
ConfDir: linuxConfDir,
|
||||
|
||||
Commands: map[string]bool{
|
||||
fmt.Sprintf(`sudo %s ssl fetch -c %s`,
|
||||
linuxKnifeCmd,
|
||||
path.Join(linuxConfDir, "client.rb")): true,
|
||||
},
|
||||
},
|
||||
|
||||
"NoSudo": {
|
||||
Config: testConfig(t, map[string]interface{}{
|
||||
"fetch_chef_certificates": true,
|
||||
"node_name": "nodename1",
|
||||
"prevent_sudo": true,
|
||||
"run_list": []interface{}{"cookbook::recipe"},
|
||||
"server_url": "https://chef.local",
|
||||
"validation_client_name": "validator",
|
||||
"validation_key_path": "test-fixtures/validator.pem",
|
||||
}),
|
||||
|
||||
KnifeCmd: windowsKnifeCmd,
|
||||
|
||||
ConfDir: windowsConfDir,
|
||||
|
||||
Commands: map[string]bool{
|
||||
fmt.Sprintf(`%s ssl fetch -c %s`,
|
||||
windowsKnifeCmd,
|
||||
path.Join(windowsConfDir, "client.rb")): true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
r := new(ResourceProvisioner)
|
||||
o := new(terraform.MockUIOutput)
|
||||
c := new(communicator.MockCommunicator)
|
||||
|
||||
for k, tc := range cases {
|
||||
c.Commands = tc.Commands
|
||||
|
||||
p, err := r.decodeConfig(tc.Config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error: %v", err)
|
||||
}
|
||||
|
||||
p.fetchChefCertificates = p.fetchChefCertificatesFunc(tc.KnifeCmd, tc.ConfDir)
|
||||
p.useSudo = !p.PreventSudo
|
||||
|
||||
err = p.fetchChefCertificates(o, c)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %q failed: %v", k, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -196,6 +196,31 @@ func TestResourceProvider_windowsCreateConfigFiles(t *testing.T) {
|
|||
`"subkey2b":{"subkey3":"value3"}}},"key2":"value2","run_list":["cookbook::recipe"]}`,
|
||||
},
|
||||
},
|
||||
|
||||
"Attributes JSON": {
|
||||
Config: testConfig(t, map[string]interface{}{
|
||||
"attributes_json": `{"key1":{"subkey1":{"subkey2a":["val1","val2","val3"],` +
|
||||
`"subkey2b":{"subkey3":"value3"}}},"key2":"value2"}`,
|
||||
"node_name": "nodename1",
|
||||
"run_list": []interface{}{"cookbook::recipe"},
|
||||
"secret_key_path": "test-fixtures/encrypted_data_bag_secret",
|
||||
"server_url": "https://chef.local",
|
||||
"validation_client_name": "validator",
|
||||
"validation_key_path": "test-fixtures/validator.pem",
|
||||
}),
|
||||
|
||||
Commands: map[string]bool{
|
||||
fmt.Sprintf("cmd /c if not exist %q mkdir %q", windowsConfDir, windowsConfDir): true,
|
||||
},
|
||||
|
||||
Uploads: map[string]string{
|
||||
windowsConfDir + "/client.rb": defaultWindowsClientConf,
|
||||
windowsConfDir + "/encrypted_data_bag_secret": "SECRET-KEY-FILE",
|
||||
windowsConfDir + "/validation.pem": "VALIDATOR-PEM-FILE",
|
||||
windowsConfDir + "/first-boot.json": `{"key1":{"subkey1":{"subkey2a":["val1","val2","val3"],` +
|
||||
`"subkey2b":{"subkey3":"value3"}}},"key2":"value2","run_list":["cookbook::recipe"]}`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
r := new(ResourceProvisioner)
|
||||
|
|
191
vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/LICENSE
generated
vendored
Normal file
191
vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2015 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
27
vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
27
vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
22
vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
25
vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE
generated
vendored
Normal file
25
vendor/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
Gocheck - A rich testing framework for Go
|
||||
|
||||
Copyright (c) 2010-2013 Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,131 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Tests for CGI (the child process perspective)
|
||||
|
||||
package cgi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRequest(t *testing.T) {
|
||||
env := map[string]string{
|
||||
"SERVER_PROTOCOL": "HTTP/1.1",
|
||||
"REQUEST_METHOD": "GET",
|
||||
"HTTP_HOST": "example.com",
|
||||
"HTTP_REFERER": "elsewhere",
|
||||
"HTTP_USER_AGENT": "goclient",
|
||||
"HTTP_FOO_BAR": "baz",
|
||||
"REQUEST_URI": "/path?a=b",
|
||||
"CONTENT_LENGTH": "123",
|
||||
"CONTENT_TYPE": "text/xml",
|
||||
"REMOTE_ADDR": "5.6.7.8",
|
||||
}
|
||||
req, err := RequestFromMap(env)
|
||||
if err != nil {
|
||||
t.Fatalf("RequestFromMap: %v", err)
|
||||
}
|
||||
if g, e := req.UserAgent(), "goclient"; e != g {
|
||||
t.Errorf("expected UserAgent %q; got %q", e, g)
|
||||
}
|
||||
if g, e := req.Method, "GET"; e != g {
|
||||
t.Errorf("expected Method %q; got %q", e, g)
|
||||
}
|
||||
if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g {
|
||||
t.Errorf("expected Content-Type %q; got %q", e, g)
|
||||
}
|
||||
if g, e := req.ContentLength, int64(123); e != g {
|
||||
t.Errorf("expected ContentLength %d; got %d", e, g)
|
||||
}
|
||||
if g, e := req.Referer(), "elsewhere"; e != g {
|
||||
t.Errorf("expected Referer %q; got %q", e, g)
|
||||
}
|
||||
if req.Header == nil {
|
||||
t.Fatalf("unexpected nil Header")
|
||||
}
|
||||
if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g {
|
||||
t.Errorf("expected Foo-Bar %q; got %q", e, g)
|
||||
}
|
||||
if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g {
|
||||
t.Errorf("expected URL %q; got %q", e, g)
|
||||
}
|
||||
if g, e := req.FormValue("a"), "b"; e != g {
|
||||
t.Errorf("expected FormValue(a) %q; got %q", e, g)
|
||||
}
|
||||
if req.Trailer == nil {
|
||||
t.Errorf("unexpected nil Trailer")
|
||||
}
|
||||
if req.TLS != nil {
|
||||
t.Errorf("expected nil TLS")
|
||||
}
|
||||
if e, g := "5.6.7.8:0", req.RemoteAddr; e != g {
|
||||
t.Errorf("RemoteAddr: got %q; want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestWithTLS(t *testing.T) {
|
||||
env := map[string]string{
|
||||
"SERVER_PROTOCOL": "HTTP/1.1",
|
||||
"REQUEST_METHOD": "GET",
|
||||
"HTTP_HOST": "example.com",
|
||||
"HTTP_REFERER": "elsewhere",
|
||||
"REQUEST_URI": "/path?a=b",
|
||||
"CONTENT_TYPE": "text/xml",
|
||||
"HTTPS": "1",
|
||||
"REMOTE_ADDR": "5.6.7.8",
|
||||
}
|
||||
req, err := RequestFromMap(env)
|
||||
if err != nil {
|
||||
t.Fatalf("RequestFromMap: %v", err)
|
||||
}
|
||||
if g, e := req.URL.String(), "https://example.com/path?a=b"; e != g {
|
||||
t.Errorf("expected URL %q; got %q", e, g)
|
||||
}
|
||||
if req.TLS == nil {
|
||||
t.Errorf("expected non-nil TLS")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestWithoutHost(t *testing.T) {
|
||||
env := map[string]string{
|
||||
"SERVER_PROTOCOL": "HTTP/1.1",
|
||||
"HTTP_HOST": "",
|
||||
"REQUEST_METHOD": "GET",
|
||||
"REQUEST_URI": "/path?a=b",
|
||||
"CONTENT_LENGTH": "123",
|
||||
}
|
||||
req, err := RequestFromMap(env)
|
||||
if err != nil {
|
||||
t.Fatalf("RequestFromMap: %v", err)
|
||||
}
|
||||
if req.URL == nil {
|
||||
t.Fatalf("unexpected nil URL")
|
||||
}
|
||||
if g, e := req.URL.String(), "/path?a=b"; e != g {
|
||||
t.Errorf("URL = %q; want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestWithoutRequestURI(t *testing.T) {
|
||||
env := map[string]string{
|
||||
"SERVER_PROTOCOL": "HTTP/1.1",
|
||||
"HTTP_HOST": "example.com",
|
||||
"REQUEST_METHOD": "GET",
|
||||
"SCRIPT_NAME": "/dir/scriptname",
|
||||
"PATH_INFO": "/p1/p2",
|
||||
"QUERY_STRING": "a=1&b=2",
|
||||
"CONTENT_LENGTH": "123",
|
||||
}
|
||||
req, err := RequestFromMap(env)
|
||||
if err != nil {
|
||||
t.Fatalf("RequestFromMap: %v", err)
|
||||
}
|
||||
if req.URL == nil {
|
||||
t.Fatalf("unexpected nil URL")
|
||||
}
|
||||
if g, e := req.URL.String(), "http://example.com/dir/scriptname/p1/p2?a=1&b=2"; e != g {
|
||||
t.Errorf("URL = %q; want %q", g, e)
|
||||
}
|
||||
}
|
|
@ -1,461 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Tests for package cgi
|
||||
|
||||
package cgi
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newRequest(httpreq string) *http.Request {
|
||||
buf := bufio.NewReader(strings.NewReader(httpreq))
|
||||
req, err := http.ReadRequest(buf)
|
||||
if err != nil {
|
||||
panic("cgi: bogus http request in test: " + httpreq)
|
||||
}
|
||||
req.RemoteAddr = "1.2.3.4"
|
||||
return req
|
||||
}
|
||||
|
||||
func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder {
|
||||
rw := httptest.NewRecorder()
|
||||
req := newRequest(httpreq)
|
||||
h.ServeHTTP(rw, req)
|
||||
|
||||
// Make a map to hold the test map that the CGI returns.
|
||||
m := make(map[string]string)
|
||||
m["_body"] = rw.Body.String()
|
||||
linesRead := 0
|
||||
readlines:
|
||||
for {
|
||||
line, err := rw.Body.ReadString('\n')
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
break readlines
|
||||
case err != nil:
|
||||
t.Fatalf("unexpected error reading from CGI: %v", err)
|
||||
}
|
||||
linesRead++
|
||||
trimmedLine := strings.TrimRight(line, "\r\n")
|
||||
split := strings.SplitN(trimmedLine, "=", 2)
|
||||
if len(split) != 2 {
|
||||
t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v",
|
||||
len(split), linesRead, line, m)
|
||||
}
|
||||
m[split[0]] = split[1]
|
||||
}
|
||||
|
||||
for key, expected := range expectedMap {
|
||||
got := m[key]
|
||||
if key == "cwd" {
|
||||
// For Windows. golang.org/issue/4645.
|
||||
fi1, _ := os.Stat(got)
|
||||
fi2, _ := os.Stat(expected)
|
||||
if os.SameFile(fi1, fi2) {
|
||||
got = expected
|
||||
}
|
||||
}
|
||||
if got != expected {
|
||||
t.Errorf("for key %q got %q; expected %q", key, got, expected)
|
||||
}
|
||||
}
|
||||
return rw
|
||||
}
|
||||
|
||||
var cgiTested, cgiWorks bool
|
||||
|
||||
func check(t *testing.T) {
|
||||
if !cgiTested {
|
||||
cgiTested = true
|
||||
cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil
|
||||
}
|
||||
if !cgiWorks {
|
||||
// No Perl on Windows, needed by test.cgi
|
||||
// TODO: make the child process be Go, not Perl.
|
||||
t.Skip("Skipping test: test.cgi failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCGIBasicGet(t *testing.T) {
|
||||
check(t)
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"test": "Hello CGI",
|
||||
"param-a": "b",
|
||||
"param-foo": "bar",
|
||||
"env-GATEWAY_INTERFACE": "CGI/1.1",
|
||||
"env-HTTP_HOST": "example.com",
|
||||
"env-PATH_INFO": "",
|
||||
"env-QUERY_STRING": "foo=bar&a=b",
|
||||
"env-REMOTE_ADDR": "1.2.3.4",
|
||||
"env-REMOTE_HOST": "1.2.3.4",
|
||||
"env-REQUEST_METHOD": "GET",
|
||||
"env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
|
||||
"env-SCRIPT_FILENAME": "testdata/test.cgi",
|
||||
"env-SCRIPT_NAME": "/test.cgi",
|
||||
"env-SERVER_NAME": "example.com",
|
||||
"env-SERVER_PORT": "80",
|
||||
"env-SERVER_SOFTWARE": "go",
|
||||
}
|
||||
replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
|
||||
if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected {
|
||||
t.Errorf("got a Content-Type of %q; expected %q", got, expected)
|
||||
}
|
||||
if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
|
||||
t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCGIBasicGetAbsPath(t *testing.T) {
|
||||
check(t)
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd error: %v", err)
|
||||
}
|
||||
h := &Handler{
|
||||
Path: pwd + "/testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
|
||||
"env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi",
|
||||
"env-SCRIPT_NAME": "/test.cgi",
|
||||
}
|
||||
runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
}
|
||||
|
||||
func TestPathInfo(t *testing.T) {
|
||||
check(t)
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"param-a": "b",
|
||||
"env-PATH_INFO": "/extrapath",
|
||||
"env-QUERY_STRING": "a=b",
|
||||
"env-REQUEST_URI": "/test.cgi/extrapath?a=b",
|
||||
"env-SCRIPT_FILENAME": "testdata/test.cgi",
|
||||
"env-SCRIPT_NAME": "/test.cgi",
|
||||
}
|
||||
runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
}
|
||||
|
||||
func TestPathInfoDirRoot(t *testing.T) {
|
||||
check(t)
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/myscript/",
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"env-PATH_INFO": "bar",
|
||||
"env-QUERY_STRING": "a=b",
|
||||
"env-REQUEST_URI": "/myscript/bar?a=b",
|
||||
"env-SCRIPT_FILENAME": "testdata/test.cgi",
|
||||
"env-SCRIPT_NAME": "/myscript/",
|
||||
}
|
||||
runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
}
|
||||
|
||||
func TestDupHeaders(t *testing.T) {
|
||||
check(t)
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"env-REQUEST_URI": "/myscript/bar?a=b",
|
||||
"env-SCRIPT_FILENAME": "testdata/test.cgi",
|
||||
"env-HTTP_COOKIE": "nom=NOM; yum=YUM",
|
||||
"env-HTTP_X_FOO": "val1, val2",
|
||||
}
|
||||
runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+
|
||||
"Cookie: nom=NOM\n"+
|
||||
"Cookie: yum=YUM\n"+
|
||||
"X-Foo: val1\n"+
|
||||
"X-Foo: val2\n"+
|
||||
"Host: example.com\n\n",
|
||||
expectedMap)
|
||||
}
|
||||
|
||||
func TestPathInfoNoRoot(t *testing.T) {
|
||||
check(t)
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "",
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"env-PATH_INFO": "/bar",
|
||||
"env-QUERY_STRING": "a=b",
|
||||
"env-REQUEST_URI": "/bar?a=b",
|
||||
"env-SCRIPT_FILENAME": "testdata/test.cgi",
|
||||
"env-SCRIPT_NAME": "/",
|
||||
}
|
||||
runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
}
|
||||
|
||||
func TestCGIBasicPost(t *testing.T) {
|
||||
check(t)
|
||||
postReq := `POST /test.cgi?a=b HTTP/1.0
|
||||
Host: example.com
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
Content-Length: 15
|
||||
|
||||
postfoo=postbar`
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"test": "Hello CGI",
|
||||
"param-postfoo": "postbar",
|
||||
"env-REQUEST_METHOD": "POST",
|
||||
"env-CONTENT_LENGTH": "15",
|
||||
"env-REQUEST_URI": "/test.cgi?a=b",
|
||||
}
|
||||
runCgiTest(t, h, postReq, expectedMap)
|
||||
}
|
||||
|
||||
func chunk(s string) string {
|
||||
return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
|
||||
}
|
||||
|
||||
// The CGI spec doesn't allow chunked requests.
|
||||
func TestCGIPostChunked(t *testing.T) {
|
||||
check(t)
|
||||
postReq := `POST /test.cgi?a=b HTTP/1.1
|
||||
Host: example.com
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
Transfer-Encoding: chunked
|
||||
|
||||
` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("")
|
||||
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
}
|
||||
expectedMap := map[string]string{}
|
||||
resp := runCgiTest(t, h, postReq, expectedMap)
|
||||
if got, expected := resp.Code, http.StatusBadRequest; got != expected {
|
||||
t.Fatalf("Expected %v response code from chunked request body; got %d",
|
||||
expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRedirect(t *testing.T) {
|
||||
check(t)
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
}
|
||||
rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil)
|
||||
if e, g := 302, rec.Code; e != g {
|
||||
t.Errorf("expected status code %d; got %d", e, g)
|
||||
}
|
||||
if e, g := "http://foo.com/", rec.Header().Get("Location"); e != g {
|
||||
t.Errorf("expected Location header of %q; got %q", e, g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalRedirect(t *testing.T) {
|
||||
check(t)
|
||||
baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path)
|
||||
fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr)
|
||||
})
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
PathLocationHandler: baseHandler,
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"basepath": "/foo",
|
||||
"remoteaddr": "1.2.3.4",
|
||||
}
|
||||
runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
}
|
||||
|
||||
// TestCopyError tests that we kill the process if there's an error copying
|
||||
// its output. (for example, from the client having gone away)
|
||||
func TestCopyError(t *testing.T) {
|
||||
check(t)
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skipf("skipping test on %q", runtime.GOOS)
|
||||
}
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
}
|
||||
ts := httptest.NewServer(h)
|
||||
defer ts.Close()
|
||||
|
||||
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req, _ := http.NewRequest("GET", "http://example.com/test.cgi?bigresponse=1", nil)
|
||||
err = req.Write(conn)
|
||||
if err != nil {
|
||||
t.Fatalf("Write: %v", err)
|
||||
}
|
||||
|
||||
res, err := http.ReadResponse(bufio.NewReader(conn), req)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadResponse: %v", err)
|
||||
}
|
||||
|
||||
pidstr := res.Header.Get("X-CGI-Pid")
|
||||
if pidstr == "" {
|
||||
t.Fatalf("expected an X-CGI-Pid header in response")
|
||||
}
|
||||
pid, err := strconv.Atoi(pidstr)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid X-CGI-Pid value")
|
||||
}
|
||||
|
||||
var buf [5000]byte
|
||||
n, err := io.ReadFull(res.Body, buf[:])
|
||||
if err != nil {
|
||||
t.Fatalf("ReadFull: %d bytes, %v", n, err)
|
||||
}
|
||||
|
||||
childRunning := func() bool {
|
||||
return isProcessRunning(t, pid)
|
||||
}
|
||||
|
||||
if !childRunning() {
|
||||
t.Fatalf("pre-conn.Close, expected child to be running")
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
tries := 0
|
||||
for tries < 25 && childRunning() {
|
||||
time.Sleep(50 * time.Millisecond * time.Duration(tries))
|
||||
tries++
|
||||
}
|
||||
if childRunning() {
|
||||
t.Fatalf("post-conn.Close, expected child to be gone")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirUnix(t *testing.T) {
|
||||
check(t)
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skipf("skipping test on %q", runtime.GOOS)
|
||||
}
|
||||
cwd, _ := os.Getwd()
|
||||
h := &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
Dir: cwd,
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"cwd": cwd,
|
||||
}
|
||||
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
|
||||
cwd, _ = os.Getwd()
|
||||
cwd = filepath.Join(cwd, "testdata")
|
||||
h = &Handler{
|
||||
Path: "testdata/test.cgi",
|
||||
Root: "/test.cgi",
|
||||
}
|
||||
expectedMap = map[string]string{
|
||||
"cwd": cwd,
|
||||
}
|
||||
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
}
|
||||
|
||||
func TestDirWindows(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("Skipping windows specific test.")
|
||||
}
|
||||
|
||||
cgifile, _ := filepath.Abs("testdata/test.cgi")
|
||||
|
||||
var perl string
|
||||
var err error
|
||||
perl, err = exec.LookPath("perl")
|
||||
if err != nil {
|
||||
t.Skip("Skipping test: perl not found.")
|
||||
}
|
||||
perl, _ = filepath.Abs(perl)
|
||||
|
||||
cwd, _ := os.Getwd()
|
||||
h := &Handler{
|
||||
Path: perl,
|
||||
Root: "/test.cgi",
|
||||
Dir: cwd,
|
||||
Args: []string{cgifile},
|
||||
Env: []string{"SCRIPT_FILENAME=" + cgifile},
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"cwd": cwd,
|
||||
}
|
||||
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
|
||||
// If not specify Dir on windows, working directory should be
|
||||
// base directory of perl.
|
||||
cwd, _ = filepath.Split(perl)
|
||||
if cwd != "" && cwd[len(cwd)-1] == filepath.Separator {
|
||||
cwd = cwd[:len(cwd)-1]
|
||||
}
|
||||
h = &Handler{
|
||||
Path: perl,
|
||||
Root: "/test.cgi",
|
||||
Args: []string{cgifile},
|
||||
Env: []string{"SCRIPT_FILENAME=" + cgifile},
|
||||
}
|
||||
expectedMap = map[string]string{
|
||||
"cwd": cwd,
|
||||
}
|
||||
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
}
|
||||
|
||||
func TestEnvOverride(t *testing.T) {
|
||||
cgifile, _ := filepath.Abs("testdata/test.cgi")
|
||||
|
||||
var perl string
|
||||
var err error
|
||||
perl, err = exec.LookPath("perl")
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test: perl not found.")
|
||||
}
|
||||
perl, _ = filepath.Abs(perl)
|
||||
|
||||
cwd, _ := os.Getwd()
|
||||
h := &Handler{
|
||||
Path: perl,
|
||||
Root: "/test.cgi",
|
||||
Dir: cwd,
|
||||
Args: []string{cgifile},
|
||||
Env: []string{
|
||||
"SCRIPT_FILENAME=" + cgifile,
|
||||
"REQUEST_URI=/foo/bar"},
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"cwd": cwd,
|
||||
"env-SCRIPT_FILENAME": cgifile,
|
||||
"env-REQUEST_URI": "/foo/bar",
|
||||
}
|
||||
runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
}
|
|
@ -1,228 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Tests a Go CGI program running under a Go CGI host process.
|
||||
// Further, the two programs are the same binary, just checking
|
||||
// their environment to figure out what mode to run in.
|
||||
|
||||
package cgi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This test is a CGI host (testing host.go) that runs its own binary
|
||||
// as a child process testing the other half of CGI (child.go).
|
||||
func TestHostingOurselves(t *testing.T) {
|
||||
if runtime.GOOS == "nacl" {
|
||||
t.Skip("skipping on nacl")
|
||||
}
|
||||
|
||||
h := &Handler{
|
||||
Path: os.Args[0],
|
||||
Root: "/test.go",
|
||||
Args: []string{"-test.run=TestBeChildCGIProcess"},
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"test": "Hello CGI-in-CGI",
|
||||
"param-a": "b",
|
||||
"param-foo": "bar",
|
||||
"env-GATEWAY_INTERFACE": "CGI/1.1",
|
||||
"env-HTTP_HOST": "example.com",
|
||||
"env-PATH_INFO": "",
|
||||
"env-QUERY_STRING": "foo=bar&a=b",
|
||||
"env-REMOTE_ADDR": "1.2.3.4",
|
||||
"env-REMOTE_HOST": "1.2.3.4",
|
||||
"env-REQUEST_METHOD": "GET",
|
||||
"env-REQUEST_URI": "/test.go?foo=bar&a=b",
|
||||
"env-SCRIPT_FILENAME": os.Args[0],
|
||||
"env-SCRIPT_NAME": "/test.go",
|
||||
"env-SERVER_NAME": "example.com",
|
||||
"env-SERVER_PORT": "80",
|
||||
"env-SERVER_SOFTWARE": "go",
|
||||
}
|
||||
replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
|
||||
if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected {
|
||||
t.Errorf("got a Content-Type of %q; expected %q", got, expected)
|
||||
}
|
||||
if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
|
||||
t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
|
||||
}
|
||||
}
|
||||
|
||||
type customWriterRecorder struct {
|
||||
w io.Writer
|
||||
*httptest.ResponseRecorder
|
||||
}
|
||||
|
||||
func (r *customWriterRecorder) Write(p []byte) (n int, err error) {
|
||||
return r.w.Write(p)
|
||||
}
|
||||
|
||||
type limitWriter struct {
|
||||
w io.Writer
|
||||
n int
|
||||
}
|
||||
|
||||
func (w *limitWriter) Write(p []byte) (n int, err error) {
|
||||
if len(p) > w.n {
|
||||
p = p[:w.n]
|
||||
}
|
||||
if len(p) > 0 {
|
||||
n, err = w.w.Write(p)
|
||||
w.n -= n
|
||||
}
|
||||
if w.n == 0 {
|
||||
err = errors.New("past write limit")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If there's an error copying the child's output to the parent, test
|
||||
// that we kill the child.
|
||||
func TestKillChildAfterCopyError(t *testing.T) {
|
||||
if runtime.GOOS == "nacl" {
|
||||
t.Skip("skipping on nacl")
|
||||
}
|
||||
|
||||
defer func() { testHookStartProcess = nil }()
|
||||
proc := make(chan *os.Process, 1)
|
||||
testHookStartProcess = func(p *os.Process) {
|
||||
proc <- p
|
||||
}
|
||||
|
||||
h := &Handler{
|
||||
Path: os.Args[0],
|
||||
Root: "/test.go",
|
||||
Args: []string{"-test.run=TestBeChildCGIProcess"},
|
||||
}
|
||||
req, _ := http.NewRequest("GET", "http://example.com/test.cgi?write-forever=1", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
var out bytes.Buffer
|
||||
const writeLen = 50 << 10
|
||||
rw := &customWriterRecorder{&limitWriter{&out, writeLen}, rec}
|
||||
|
||||
donec := make(chan bool, 1)
|
||||
go func() {
|
||||
h.ServeHTTP(rw, req)
|
||||
donec <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-donec:
|
||||
if out.Len() != writeLen || out.Bytes()[0] != 'a' {
|
||||
t.Errorf("unexpected output: %q", out.Bytes())
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("timeout. ServeHTTP hung and didn't kill the child process?")
|
||||
select {
|
||||
case p := <-proc:
|
||||
p.Kill()
|
||||
t.Logf("killed process")
|
||||
default:
|
||||
t.Logf("didn't kill process")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test that a child handler writing only headers works.
|
||||
// golang.org/issue/7196
|
||||
func TestChildOnlyHeaders(t *testing.T) {
|
||||
if runtime.GOOS == "nacl" {
|
||||
t.Skip("skipping on nacl")
|
||||
}
|
||||
|
||||
h := &Handler{
|
||||
Path: os.Args[0],
|
||||
Root: "/test.go",
|
||||
Args: []string{"-test.run=TestBeChildCGIProcess"},
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"_body": "",
|
||||
}
|
||||
replay := runCgiTest(t, h, "GET /test.go?no-body=1 HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected {
|
||||
t.Errorf("got a X-Test-Header of %q; expected %q", got, expected)
|
||||
}
|
||||
}
|
||||
|
||||
// golang.org/issue/7198
|
||||
func Test500WithNoHeaders(t *testing.T) { want500Test(t, "/immediate-disconnect") }
|
||||
func Test500WithNoContentType(t *testing.T) { want500Test(t, "/no-content-type") }
|
||||
func Test500WithEmptyHeaders(t *testing.T) { want500Test(t, "/empty-headers") }
|
||||
|
||||
func want500Test(t *testing.T, path string) {
|
||||
h := &Handler{
|
||||
Path: os.Args[0],
|
||||
Root: "/test.go",
|
||||
Args: []string{"-test.run=TestBeChildCGIProcess"},
|
||||
}
|
||||
expectedMap := map[string]string{
|
||||
"_body": "",
|
||||
}
|
||||
replay := runCgiTest(t, h, "GET "+path+" HTTP/1.0\nHost: example.com\n\n", expectedMap)
|
||||
if replay.Code != 500 {
|
||||
t.Errorf("Got code %d; want 500", replay.Code)
|
||||
}
|
||||
}
|
||||
|
||||
type neverEnding byte
|
||||
|
||||
func (b neverEnding) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
p[i] = byte(b)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Note: not actually a test.
|
||||
func TestBeChildCGIProcess(t *testing.T) {
|
||||
if os.Getenv("REQUEST_METHOD") == "" {
|
||||
// Not in a CGI environment; skipping test.
|
||||
return
|
||||
}
|
||||
switch os.Getenv("REQUEST_URI") {
|
||||
case "/immediate-disconnect":
|
||||
os.Exit(0)
|
||||
case "/no-content-type":
|
||||
fmt.Printf("Content-Length: 6\n\nHello\n")
|
||||
os.Exit(0)
|
||||
case "/empty-headers":
|
||||
fmt.Printf("\nHello")
|
||||
os.Exit(0)
|
||||
}
|
||||
Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.Header().Set("X-Test-Header", "X-Test-Value")
|
||||
req.ParseForm()
|
||||
if req.FormValue("no-body") == "1" {
|
||||
return
|
||||
}
|
||||
if req.FormValue("write-forever") == "1" {
|
||||
io.Copy(rw, neverEnding('a'))
|
||||
for {
|
||||
time.Sleep(5 * time.Second) // hang forever, until killed
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
|
||||
for k, vv := range req.Form {
|
||||
for _, v := range vv {
|
||||
fmt.Fprintf(rw, "param-%s=%s\n", k, v)
|
||||
}
|
||||
}
|
||||
for _, kv := range os.Environ() {
|
||||
fmt.Fprintf(rw, "env-%s\n", kv)
|
||||
}
|
||||
}))
|
||||
os.Exit(0)
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build plan9
|
||||
|
||||
package cgi
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func isProcessRunning(t *testing.T, pid int) bool {
|
||||
_, err := os.Stat("/proc/" + strconv.Itoa(pid))
|
||||
return err == nil
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package cgi
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func isProcessRunning(t *testing.T, pid int) bool {
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return p.Signal(syscall.Signal(0)) == nil
|
||||
}
|
|
@ -1,91 +0,0 @@
|
|||
#!/usr/bin/perl
|
||||
# Copyright 2011 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
#
|
||||
# Test script run as a child process under cgi_test.go
|
||||
|
||||
use strict;
|
||||
use Cwd;
|
||||
|
||||
binmode STDOUT;
|
||||
|
||||
my $q = MiniCGI->new;
|
||||
my $params = $q->Vars;
|
||||
|
||||
if ($params->{"loc"}) {
|
||||
print "Location: $params->{loc}\r\n\r\n";
|
||||
exit(0);
|
||||
}
|
||||
|
||||
print "Content-Type: text/html\r\n";
|
||||
print "X-CGI-Pid: $$\r\n";
|
||||
print "X-Test-Header: X-Test-Value\r\n";
|
||||
print "\r\n";
|
||||
|
||||
if ($params->{"bigresponse"}) {
|
||||
# 17 MB, for OS X: golang.org/issue/4958
|
||||
for (1..(17 * 1024)) {
|
||||
print "A" x 1024, "\r\n";
|
||||
}
|
||||
exit 0;
|
||||
}
|
||||
|
||||
print "test=Hello CGI\r\n";
|
||||
|
||||
foreach my $k (sort keys %$params) {
|
||||
print "param-$k=$params->{$k}\r\n";
|
||||
}
|
||||
|
||||
foreach my $k (sort keys %ENV) {
|
||||
my $clean_env = $ENV{$k};
|
||||
$clean_env =~ s/[\n\r]//g;
|
||||
print "env-$k=$clean_env\r\n";
|
||||
}
|
||||
|
||||
# NOTE: msys perl returns /c/go/src/... not C:\go\....
|
||||
my $dir = getcwd();
|
||||
if ($^O eq 'MSWin32' || $^O eq 'msys') {
|
||||
if ($dir =~ /^.:/) {
|
||||
$dir =~ s!/!\\!g;
|
||||
} else {
|
||||
my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe';
|
||||
$cmd =~ s!\\!/!g;
|
||||
$dir = `$cmd /c cd`;
|
||||
chomp $dir;
|
||||
}
|
||||
}
|
||||
print "cwd=$dir\r\n";
|
||||
|
||||
# A minimal version of CGI.pm, for people without the perl-modules
|
||||
# package installed. (CGI.pm used to be part of the Perl core, but
|
||||
# some distros now bundle perl-base and perl-modules separately...)
|
||||
package MiniCGI;
|
||||
|
||||
sub new {
|
||||
my $class = shift;
|
||||
return bless {}, $class;
|
||||
}
|
||||
|
||||
sub Vars {
|
||||
my $self = shift;
|
||||
my $pairs;
|
||||
if ($ENV{CONTENT_LENGTH}) {
|
||||
$pairs = do { local $/; <STDIN> };
|
||||
} else {
|
||||
$pairs = $ENV{QUERY_STRING};
|
||||
}
|
||||
my $vars = {};
|
||||
foreach my $kv (split(/&/, $pairs)) {
|
||||
my ($k, $v) = split(/=/, $kv, 2);
|
||||
$vars->{_urldecode($k)} = _urldecode($v);
|
||||
}
|
||||
return $vars;
|
||||
}
|
||||
|
||||
sub _urldecode {
|
||||
my $v = shift;
|
||||
$v =~ tr/+/ /;
|
||||
$v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
|
||||
return $v;
|
||||
}
|
|
@ -1,159 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code is duplicated in net/http and net/http/httputil.
|
||||
// Please make any changes in both files.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChunk(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
|
||||
w := newChunkedWriter(&b)
|
||||
const chunk1 = "hello, "
|
||||
const chunk2 = "world! 0123456789abcdef"
|
||||
w.Write([]byte(chunk1))
|
||||
w.Write([]byte(chunk2))
|
||||
w.Close()
|
||||
|
||||
if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
|
||||
t.Fatalf("chunk writer wrote %q; want %q", g, e)
|
||||
}
|
||||
|
||||
r := newChunkedReader(&b)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Logf(`data: "%s"`, data)
|
||||
t.Fatalf("ReadAll from reader: %v", err)
|
||||
}
|
||||
if g, e := string(data), chunk1+chunk2; g != e {
|
||||
t.Errorf("chunk reader read %q; want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChunkReadMultiple(t *testing.T) {
|
||||
// Bunch of small chunks, all read together.
|
||||
{
|
||||
var b bytes.Buffer
|
||||
w := newChunkedWriter(&b)
|
||||
w.Write([]byte("foo"))
|
||||
w.Write([]byte("bar"))
|
||||
w.Close()
|
||||
|
||||
r := newChunkedReader(&b)
|
||||
buf := make([]byte, 10)
|
||||
n, err := r.Read(buf)
|
||||
if n != 6 || err != io.EOF {
|
||||
t.Errorf("Read = %d, %v; want 6, EOF", n, err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
if string(buf) != "foobar" {
|
||||
t.Errorf("Read = %q; want %q", buf, "foobar")
|
||||
}
|
||||
}
|
||||
|
||||
// One big chunk followed by a little chunk, but the small bufio.Reader size
|
||||
// should prevent the second chunk header from being read.
|
||||
{
|
||||
var b bytes.Buffer
|
||||
w := newChunkedWriter(&b)
|
||||
// fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes,
|
||||
// the same as the bufio ReaderSize below (the minimum), so even
|
||||
// though we're going to try to Read with a buffer larger enough to also
|
||||
// receive "foo", the second chunk header won't be read yet.
|
||||
const fillBufChunk = "0123456789a"
|
||||
const shortChunk = "foo"
|
||||
w.Write([]byte(fillBufChunk))
|
||||
w.Write([]byte(shortChunk))
|
||||
w.Close()
|
||||
|
||||
r := newChunkedReader(bufio.NewReaderSize(&b, 16))
|
||||
buf := make([]byte, len(fillBufChunk)+len(shortChunk))
|
||||
n, err := r.Read(buf)
|
||||
if n != len(fillBufChunk) || err != nil {
|
||||
t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk))
|
||||
}
|
||||
buf = buf[:n]
|
||||
if string(buf) != fillBufChunk {
|
||||
t.Errorf("Read = %q; want %q", buf, fillBufChunk)
|
||||
}
|
||||
|
||||
n, err = r.Read(buf)
|
||||
if n != len(shortChunk) || err != io.EOF {
|
||||
t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk))
|
||||
}
|
||||
}
|
||||
|
||||
// And test that we see an EOF chunk, even though our buffer is already full:
|
||||
{
|
||||
r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n")))
|
||||
buf := make([]byte, 3)
|
||||
n, err := r.Read(buf)
|
||||
if n != 3 || err != io.EOF {
|
||||
t.Errorf("Read = %d, %v; want 3, EOF", n, err)
|
||||
}
|
||||
if string(buf) != "foo" {
|
||||
t.Errorf("buf = %q; want foo", buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChunkReaderAllocs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
w := newChunkedWriter(&buf)
|
||||
a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc")
|
||||
w.Write(a)
|
||||
w.Write(b)
|
||||
w.Write(c)
|
||||
w.Close()
|
||||
|
||||
readBuf := make([]byte, len(a)+len(b)+len(c)+1)
|
||||
byter := bytes.NewReader(buf.Bytes())
|
||||
bufr := bufio.NewReader(byter)
|
||||
mallocs := testing.AllocsPerRun(100, func() {
|
||||
byter.Seek(0, 0)
|
||||
bufr.Reset(byter)
|
||||
r := newChunkedReader(bufr)
|
||||
n, err := io.ReadFull(r, readBuf)
|
||||
if n != len(readBuf)-1 {
|
||||
t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1)
|
||||
}
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read error = %v; want ErrUnexpectedEOF", err)
|
||||
}
|
||||
})
|
||||
if mallocs > 1.5 {
|
||||
t.Errorf("mallocs = %v; want 1", mallocs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHexUint(t *testing.T) {
|
||||
for i := uint64(0); i <= 1234; i++ {
|
||||
line := []byte(fmt.Sprintf("%x", i))
|
||||
got, err := parseHexUint(line)
|
||||
if err != nil {
|
||||
t.Fatalf("on %d: %v", i, err)
|
||||
}
|
||||
if got != i {
|
||||
t.Errorf("for input %q = %d; want %d", line, got, i)
|
||||
}
|
||||
}
|
||||
_, err := parseHexUint([]byte("bogus"))
|
||||
if err == nil {
|
||||
t.Error("expected error on bogus input")
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,380 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var writeSetCookiesTests = []struct {
|
||||
Cookie *Cookie
|
||||
Raw string
|
||||
}{
|
||||
{
|
||||
&Cookie{Name: "cookie-1", Value: "v$1"},
|
||||
"cookie-1=v$1",
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600},
|
||||
"cookie-2=two; Max-Age=3600",
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "cookie-3", Value: "three", Domain: ".example.com"},
|
||||
"cookie-3=three; Domain=example.com",
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "cookie-4", Value: "four", Path: "/restricted/"},
|
||||
"cookie-4=four; Path=/restricted/",
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "cookie-5", Value: "five", Domain: "wrong;bad.abc"},
|
||||
"cookie-5=five",
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "cookie-6", Value: "six", Domain: "bad-.abc"},
|
||||
"cookie-6=six",
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "cookie-7", Value: "seven", Domain: "127.0.0.1"},
|
||||
"cookie-7=seven; Domain=127.0.0.1",
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "cookie-8", Value: "eight", Domain: "::1"},
|
||||
"cookie-8=eight",
|
||||
},
|
||||
// The "special" cookies have values containing commas or spaces which
|
||||
// are disallowed by RFC 6265 but are common in the wild.
|
||||
{
|
||||
&Cookie{Name: "special-1", Value: "a z"},
|
||||
`special-1=a z`,
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "special-2", Value: " z"},
|
||||
`special-2=" z"`,
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "special-3", Value: "a "},
|
||||
`special-3="a "`,
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "special-4", Value: " "},
|
||||
`special-4=" "`,
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "special-5", Value: "a,z"},
|
||||
`special-5=a,z`,
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "special-6", Value: ",z"},
|
||||
`special-6=",z"`,
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "special-7", Value: "a,"},
|
||||
`special-7="a,"`,
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "special-8", Value: ","},
|
||||
`special-8=","`,
|
||||
},
|
||||
{
|
||||
&Cookie{Name: "empty-value", Value: ""},
|
||||
`empty-value=`,
|
||||
},
|
||||
}
|
||||
|
||||
func TestWriteSetCookies(t *testing.T) {
|
||||
defer log.SetOutput(os.Stderr)
|
||||
var logbuf bytes.Buffer
|
||||
log.SetOutput(&logbuf)
|
||||
|
||||
for i, tt := range writeSetCookiesTests {
|
||||
if g, e := tt.Cookie.String(), tt.Raw; g != e {
|
||||
t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, e, g)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if got, sub := logbuf.String(), "dropping domain attribute"; !strings.Contains(got, sub) {
|
||||
t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got)
|
||||
}
|
||||
}
|
||||
|
||||
type headerOnlyResponseWriter Header
|
||||
|
||||
func (ho headerOnlyResponseWriter) Header() Header {
|
||||
return Header(ho)
|
||||
}
|
||||
|
||||
func (ho headerOnlyResponseWriter) Write([]byte) (int, error) {
|
||||
panic("NOIMPL")
|
||||
}
|
||||
|
||||
func (ho headerOnlyResponseWriter) WriteHeader(int) {
|
||||
panic("NOIMPL")
|
||||
}
|
||||
|
||||
func TestSetCookie(t *testing.T) {
|
||||
m := make(Header)
|
||||
SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-1", Value: "one", Path: "/restricted/"})
|
||||
SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600})
|
||||
if l := len(m["Set-Cookie"]); l != 2 {
|
||||
t.Fatalf("expected %d cookies, got %d", 2, l)
|
||||
}
|
||||
if g, e := m["Set-Cookie"][0], "cookie-1=one; Path=/restricted/"; g != e {
|
||||
t.Errorf("cookie #1: want %q, got %q", e, g)
|
||||
}
|
||||
if g, e := m["Set-Cookie"][1], "cookie-2=two; Max-Age=3600"; g != e {
|
||||
t.Errorf("cookie #2: want %q, got %q", e, g)
|
||||
}
|
||||
}
|
||||
|
||||
var addCookieTests = []struct {
|
||||
Cookies []*Cookie
|
||||
Raw string
|
||||
}{
|
||||
{
|
||||
[]*Cookie{},
|
||||
"",
|
||||
},
|
||||
{
|
||||
[]*Cookie{{Name: "cookie-1", Value: "v$1"}},
|
||||
"cookie-1=v$1",
|
||||
},
|
||||
{
|
||||
[]*Cookie{
|
||||
{Name: "cookie-1", Value: "v$1"},
|
||||
{Name: "cookie-2", Value: "v$2"},
|
||||
{Name: "cookie-3", Value: "v$3"},
|
||||
},
|
||||
"cookie-1=v$1; cookie-2=v$2; cookie-3=v$3",
|
||||
},
|
||||
}
|
||||
|
||||
func TestAddCookie(t *testing.T) {
|
||||
for i, tt := range addCookieTests {
|
||||
req, _ := NewRequest("GET", "http://example.com/", nil)
|
||||
for _, c := range tt.Cookies {
|
||||
req.AddCookie(c)
|
||||
}
|
||||
if g := req.Header.Get("Cookie"); g != tt.Raw {
|
||||
t.Errorf("Test %d:\nwant: %s\n got: %s\n", i, tt.Raw, g)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var readSetCookiesTests = []struct {
|
||||
Header Header
|
||||
Cookies []*Cookie
|
||||
}{
|
||||
{
|
||||
Header{"Set-Cookie": {"Cookie-1=v$1"}},
|
||||
[]*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}},
|
||||
[]*Cookie{{
|
||||
Name: "NID",
|
||||
Value: "99=YsDT5i3E-CXax-",
|
||||
Path: "/",
|
||||
Domain: ".google.ch",
|
||||
HttpOnly: true,
|
||||
Expires: time.Date(2011, 11, 23, 1, 5, 3, 0, time.UTC),
|
||||
RawExpires: "Wed, 23-Nov-2011 01:05:03 GMT",
|
||||
Raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}},
|
||||
[]*Cookie{{
|
||||
Name: ".ASPXAUTH",
|
||||
Value: "7E3AA",
|
||||
Path: "/",
|
||||
Expires: time.Date(2012, 3, 7, 14, 25, 6, 0, time.UTC),
|
||||
RawExpires: "Wed, 07-Mar-2012 14:25:06 GMT",
|
||||
HttpOnly: true,
|
||||
Raw: ".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}},
|
||||
[]*Cookie{{
|
||||
Name: "ASP.NET_SessionId",
|
||||
Value: "foo",
|
||||
Path: "/",
|
||||
HttpOnly: true,
|
||||
Raw: "ASP.NET_SessionId=foo; path=/; HttpOnly",
|
||||
}},
|
||||
},
|
||||
// Make sure we can properly read back the Set-Cookie headers we create
|
||||
// for values containing spaces or commas:
|
||||
{
|
||||
Header{"Set-Cookie": {`special-1=a z`}},
|
||||
[]*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {`special-2=" z"`}},
|
||||
[]*Cookie{{Name: "special-2", Value: " z", Raw: `special-2=" z"`}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {`special-3="a "`}},
|
||||
[]*Cookie{{Name: "special-3", Value: "a ", Raw: `special-3="a "`}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {`special-4=" "`}},
|
||||
[]*Cookie{{Name: "special-4", Value: " ", Raw: `special-4=" "`}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {`special-5=a,z`}},
|
||||
[]*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {`special-6=",z"`}},
|
||||
[]*Cookie{{Name: "special-6", Value: ",z", Raw: `special-6=",z"`}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {`special-7=a,`}},
|
||||
[]*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}},
|
||||
},
|
||||
{
|
||||
Header{"Set-Cookie": {`special-8=","`}},
|
||||
[]*Cookie{{Name: "special-8", Value: ",", Raw: `special-8=","`}},
|
||||
},
|
||||
|
||||
// TODO(bradfitz): users have reported seeing this in the
|
||||
// wild, but do browsers handle it? RFC 6265 just says "don't
|
||||
// do that" (section 3) and then never mentions header folding
|
||||
// again.
|
||||
// Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly, .ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}},
|
||||
}
|
||||
|
||||
func toJSON(v interface{}) string {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("%#v", v)
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func TestReadSetCookies(t *testing.T) {
|
||||
for i, tt := range readSetCookiesTests {
|
||||
for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input
|
||||
c := readSetCookies(tt.Header)
|
||||
if !reflect.DeepEqual(c, tt.Cookies) {
|
||||
t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var readCookiesTests = []struct {
|
||||
Header Header
|
||||
Filter string
|
||||
Cookies []*Cookie
|
||||
}{
|
||||
{
|
||||
Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
|
||||
"",
|
||||
[]*Cookie{
|
||||
{Name: "Cookie-1", Value: "v$1"},
|
||||
{Name: "c2", Value: "v2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}},
|
||||
"c2",
|
||||
[]*Cookie{
|
||||
{Name: "c2", Value: "v2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
|
||||
"",
|
||||
[]*Cookie{
|
||||
{Name: "Cookie-1", Value: "v$1"},
|
||||
{Name: "c2", Value: "v2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Header{"Cookie": {"Cookie-1=v$1; c2=v2"}},
|
||||
"c2",
|
||||
[]*Cookie{
|
||||
{Name: "c2", Value: "v2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestReadCookies(t *testing.T) {
|
||||
for i, tt := range readCookiesTests {
|
||||
for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input
|
||||
c := readCookies(tt.Header, tt.Filter)
|
||||
if !reflect.DeepEqual(c, tt.Cookies) {
|
||||
t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCookieSanitizeValue(t *testing.T) {
|
||||
defer log.SetOutput(os.Stderr)
|
||||
var logbuf bytes.Buffer
|
||||
log.SetOutput(&logbuf)
|
||||
|
||||
tests := []struct {
|
||||
in, want string
|
||||
}{
|
||||
{"foo", "foo"},
|
||||
{"foo;bar", "foobar"},
|
||||
{"foo\\bar", "foobar"},
|
||||
{"foo\"bar", "foobar"},
|
||||
{"\x00\x7e\x7f\x80", "\x7e"},
|
||||
{`"withquotes"`, "withquotes"},
|
||||
{"a z", "a z"},
|
||||
{" z", `" z"`},
|
||||
{"a ", `"a "`},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := sanitizeCookieValue(tt.in); got != tt.want {
|
||||
t.Errorf("sanitizeCookieValue(%q) = %q; want %q", tt.in, got, tt.want)
|
||||
}
|
||||
}
|
||||
|
||||
if got, sub := logbuf.String(), "dropping invalid bytes"; !strings.Contains(got, sub) {
|
||||
t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCookieSanitizePath(t *testing.T) {
|
||||
defer log.SetOutput(os.Stderr)
|
||||
var logbuf bytes.Buffer
|
||||
log.SetOutput(&logbuf)
|
||||
|
||||
tests := []struct {
|
||||
in, want string
|
||||
}{
|
||||
{"/path", "/path"},
|
||||
{"/path with space/", "/path with space/"},
|
||||
{"/just;no;semicolon\x00orstuff/", "/justnosemicolonorstuff/"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := sanitizeCookiePath(tt.in); got != tt.want {
|
||||
t.Errorf("sanitizeCookiePath(%q) = %q; want %q", tt.in, got, tt.want)
|
||||
}
|
||||
}
|
||||
|
||||
if got, sub := logbuf.String(), "dropping invalid bytes"; !strings.Contains(got, sub) {
|
||||
t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
161
vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go
generated
vendored
161
vendor/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go
generated
vendored
|
@ -1,161 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cookiejar
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var punycodeTestCases = [...]struct {
|
||||
s, encoded string
|
||||
}{
|
||||
{"", ""},
|
||||
{"-", "--"},
|
||||
{"-a", "-a-"},
|
||||
{"-a-", "-a--"},
|
||||
{"a", "a-"},
|
||||
{"a-", "a--"},
|
||||
{"a-b", "a-b-"},
|
||||
{"books", "books-"},
|
||||
{"bücher", "bcher-kva"},
|
||||
{"Hello世界", "Hello-ck1hg65u"},
|
||||
{"ü", "tda"},
|
||||
{"üý", "tdac"},
|
||||
|
||||
// The test cases below come from RFC 3492 section 7.1 with Errata 3026.
|
||||
{
|
||||
// (A) Arabic (Egyptian).
|
||||
"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" +
|
||||
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
|
||||
"egbpdaj6bu4bxfgehfvwxn",
|
||||
},
|
||||
{
|
||||
// (B) Chinese (simplified).
|
||||
"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
|
||||
"ihqwcrb4cv8a8dqg056pqjye",
|
||||
},
|
||||
{
|
||||
// (C) Chinese (traditional).
|
||||
"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
|
||||
"ihqwctvzc91f659drss3x8bo0yb",
|
||||
},
|
||||
{
|
||||
// (D) Czech.
|
||||
"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" +
|
||||
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" +
|
||||
"\u0065\u0073\u006B\u0079",
|
||||
"Proprostnemluvesky-uyb24dma41a",
|
||||
},
|
||||
{
|
||||
// (E) Hebrew.
|
||||
"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" +
|
||||
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" +
|
||||
"\u05D1\u05E8\u05D9\u05EA",
|
||||
"4dbcagdahymbxekheh6e0a7fei0b",
|
||||
},
|
||||
{
|
||||
// (F) Hindi (Devanagari).
|
||||
"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" +
|
||||
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" +
|
||||
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" +
|
||||
"\u0939\u0948\u0902",
|
||||
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd",
|
||||
},
|
||||
{
|
||||
// (G) Japanese (kanji and hiragana).
|
||||
"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" +
|
||||
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
|
||||
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa",
|
||||
},
|
||||
{
|
||||
// (H) Korean (Hangul syllables).
|
||||
"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" +
|
||||
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" +
|
||||
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
|
||||
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" +
|
||||
"psd879ccm6fea98c",
|
||||
},
|
||||
{
|
||||
// (I) Russian (Cyrillic).
|
||||
"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" +
|
||||
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" +
|
||||
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" +
|
||||
"\u0438",
|
||||
"b1abfaaepdrnnbgefbadotcwatmq2g4l",
|
||||
},
|
||||
{
|
||||
// (J) Spanish.
|
||||
"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" +
|
||||
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" +
|
||||
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" +
|
||||
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" +
|
||||
"\u0061\u00F1\u006F\u006C",
|
||||
"PorqunopuedensimplementehablarenEspaol-fmd56a",
|
||||
},
|
||||
{
|
||||
// (K) Vietnamese.
|
||||
"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" +
|
||||
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" +
|
||||
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" +
|
||||
"\u0056\u0069\u1EC7\u0074",
|
||||
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g",
|
||||
},
|
||||
{
|
||||
// (L) 3<nen>B<gumi><kinpachi><sensei>.
|
||||
"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
|
||||
"3B-ww4c5e180e575a65lsy2b",
|
||||
},
|
||||
{
|
||||
// (M) <amuro><namie>-with-SUPER-MONKEYS.
|
||||
"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" +
|
||||
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" +
|
||||
"\u004F\u004E\u004B\u0045\u0059\u0053",
|
||||
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n",
|
||||
},
|
||||
{
|
||||
// (N) Hello-Another-Way-<sorezore><no><basho>.
|
||||
"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" +
|
||||
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" +
|
||||
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
|
||||
"Hello-Another-Way--fc4qua05auwb3674vfr0b",
|
||||
},
|
||||
{
|
||||
// (O) <hitotsu><yane><no><shita>2.
|
||||
"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
|
||||
"2-u9tlzr9756bt3uc0v",
|
||||
},
|
||||
{
|
||||
// (P) Maji<de>Koi<suru>5<byou><mae>
|
||||
"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" +
|
||||
"\u308B\u0035\u79D2\u524D",
|
||||
"MajiKoi5-783gue6qz075azm5e",
|
||||
},
|
||||
{
|
||||
// (Q) <pafii>de<runba>
|
||||
"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
|
||||
"de-jg4avhby1noc0d",
|
||||
},
|
||||
{
|
||||
// (R) <sono><supiido><de>
|
||||
"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
|
||||
"d9juau41awczczp",
|
||||
},
|
||||
{
|
||||
// (S) -> $1.00 <-
|
||||
"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" +
|
||||
"\u003C\u002D",
|
||||
"-> $1.00 <--",
|
||||
},
|
||||
}
|
||||
|
||||
func TestPunycode(t *testing.T) {
|
||||
for _, tc := range punycodeTestCases {
|
||||
if got, err := encode("", tc.s); err != nil {
|
||||
t.Errorf(`encode("", %q): %v`, tc.s, err)
|
||||
} else if got != tc.encoded {
|
||||
t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func ExampleHijacker() {
|
||||
http.HandleFunc("/hijack", func(w http.ResponseWriter, r *http.Request) {
|
||||
hj, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
http.Error(w, "webserver doesn't support hijacking", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
conn, bufrw, err := hj.Hijack()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// Don't forget to close the connection:
|
||||
defer conn.Close()
|
||||
bufrw.WriteString("Now we're speaking raw TCP. Say hi: ")
|
||||
bufrw.Flush()
|
||||
s, err := bufrw.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Printf("error reading string: %v", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(bufrw, "You said: %q\nBye.\n", s)
|
||||
bufrw.Flush()
|
||||
})
|
||||
}
|
||||
|
||||
func ExampleGet() {
|
||||
res, err := http.Get("http://www.google.com/robots.txt")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
robots, err := ioutil.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%s", robots)
|
||||
}
|
||||
|
||||
func ExampleFileServer() {
|
||||
// Simple static webserver:
|
||||
log.Fatal(http.ListenAndServe(":8080", http.FileServer(http.Dir("/usr/share/doc"))))
|
||||
}
|
||||
|
||||
func ExampleFileServer_stripPrefix() {
|
||||
// To serve a directory on disk (/tmp) under an alternate URL
|
||||
// path (/tmpfiles/), use StripPrefix to modify the request
|
||||
// URL's path before the FileServer sees it:
|
||||
http.Handle("/tmpfiles/", http.StripPrefix("/tmpfiles/", http.FileServer(http.Dir("/tmp"))))
|
||||
}
|
||||
|
||||
func ExampleStripPrefix() {
|
||||
// To serve a directory on disk (/tmp) under an alternate URL
|
||||
// path (/tmpfiles/), use StripPrefix to modify the request
|
||||
// URL's path before the FileServer sees it:
|
||||
http.Handle("/tmpfiles/", http.StripPrefix("/tmpfiles/", http.FileServer(http.Dir("/tmp"))))
|
||||
}
|
||||
|
||||
type apiHandler struct{}
|
||||
|
||||
func (apiHandler) ServeHTTP(http.ResponseWriter, *http.Request) {}
|
||||
|
||||
func ExampleServeMux_Handle() {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/api/", apiHandler{})
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
|
||||
// The "/" pattern matches everything, so we need to check
|
||||
// that we're at the root here.
|
||||
if req.URL.Path != "/" {
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "Welcome to the home page!")
|
||||
})
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Bridge package to expose http internals to tests in the http_test
|
||||
// package.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewLoggingConn(baseName string, c net.Conn) net.Conn {
|
||||
return newLoggingConn(baseName, c)
|
||||
}
|
||||
|
||||
var ExportAppendTime = appendTime
|
||||
|
||||
func (t *Transport) NumPendingRequestsForTesting() int {
|
||||
t.reqMu.Lock()
|
||||
defer t.reqMu.Unlock()
|
||||
return len(t.reqCanceler)
|
||||
}
|
||||
|
||||
func (t *Transport) IdleConnKeysForTesting() (keys []string) {
|
||||
keys = make([]string, 0)
|
||||
t.idleMu.Lock()
|
||||
defer t.idleMu.Unlock()
|
||||
if t.idleConn == nil {
|
||||
return
|
||||
}
|
||||
for key := range t.idleConn {
|
||||
keys = append(keys, key.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Transport) IdleConnCountForTesting(cacheKey string) int {
|
||||
t.idleMu.Lock()
|
||||
defer t.idleMu.Unlock()
|
||||
if t.idleConn == nil {
|
||||
return 0
|
||||
}
|
||||
for k, conns := range t.idleConn {
|
||||
if k.String() == cacheKey {
|
||||
return len(conns)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (t *Transport) IdleConnChMapSizeForTesting() int {
|
||||
t.idleMu.Lock()
|
||||
defer t.idleMu.Unlock()
|
||||
return len(t.idleConnCh)
|
||||
}
|
||||
|
||||
func NewTestTimeoutHandler(handler Handler, ch <-chan time.Time) Handler {
|
||||
f := func() <-chan time.Time {
|
||||
return ch
|
||||
}
|
||||
return &timeoutHandler{handler, f, ""}
|
||||
}
|
||||
|
||||
func ResetCachedEnvironment() {
|
||||
httpProxyEnv.reset()
|
||||
noProxyEnv.reset()
|
||||
}
|
||||
|
||||
var DefaultUserAgent = defaultUserAgent
|
|
@ -1,150 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fcgi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var sizeTests = []struct {
|
||||
size uint32
|
||||
bytes []byte
|
||||
}{
|
||||
{0, []byte{0x00}},
|
||||
{127, []byte{0x7F}},
|
||||
{128, []byte{0x80, 0x00, 0x00, 0x80}},
|
||||
{1000, []byte{0x80, 0x00, 0x03, 0xE8}},
|
||||
{33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}},
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
b := make([]byte, 4)
|
||||
for i, test := range sizeTests {
|
||||
n := encodeSize(b, test.size)
|
||||
if !bytes.Equal(b[:n], test.bytes) {
|
||||
t.Errorf("%d expected %x, encoded %x", i, test.bytes, b)
|
||||
}
|
||||
size, n := readSize(test.bytes)
|
||||
if size != test.size {
|
||||
t.Errorf("%d expected %d, read %d", i, test.size, size)
|
||||
}
|
||||
if len(test.bytes) != n {
|
||||
t.Errorf("%d did not consume all the bytes", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var streamTests = []struct {
|
||||
desc string
|
||||
recType recType
|
||||
reqId uint16
|
||||
content []byte
|
||||
raw []byte
|
||||
}{
|
||||
{"single record", typeStdout, 1, nil,
|
||||
[]byte{1, byte(typeStdout), 0, 1, 0, 0, 0, 0},
|
||||
},
|
||||
// this data will have to be split into two records
|
||||
{"two records", typeStdin, 300, make([]byte, 66000),
|
||||
bytes.Join([][]byte{
|
||||
// header for the first record
|
||||
{1, byte(typeStdin), 0x01, 0x2C, 0xFF, 0xFF, 1, 0},
|
||||
make([]byte, 65536),
|
||||
// header for the second
|
||||
{1, byte(typeStdin), 0x01, 0x2C, 0x01, 0xD1, 7, 0},
|
||||
make([]byte, 472),
|
||||
// header for the empty record
|
||||
{1, byte(typeStdin), 0x01, 0x2C, 0, 0, 0, 0},
|
||||
},
|
||||
nil),
|
||||
},
|
||||
}
|
||||
|
||||
type nilCloser struct {
|
||||
io.ReadWriter
|
||||
}
|
||||
|
||||
func (c *nilCloser) Close() error { return nil }
|
||||
|
||||
func TestStreams(t *testing.T) {
|
||||
var rec record
|
||||
outer:
|
||||
for _, test := range streamTests {
|
||||
buf := bytes.NewBuffer(test.raw)
|
||||
var content []byte
|
||||
for buf.Len() > 0 {
|
||||
if err := rec.read(buf); err != nil {
|
||||
t.Errorf("%s: error reading record: %v", test.desc, err)
|
||||
continue outer
|
||||
}
|
||||
content = append(content, rec.content()...)
|
||||
}
|
||||
if rec.h.Type != test.recType {
|
||||
t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType)
|
||||
continue
|
||||
}
|
||||
if rec.h.Id != test.reqId {
|
||||
t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(content, test.content) {
|
||||
t.Errorf("%s: read wrong content", test.desc)
|
||||
continue
|
||||
}
|
||||
buf.Reset()
|
||||
c := newConn(&nilCloser{buf})
|
||||
w := newWriter(c, test.recType, test.reqId)
|
||||
if _, err := w.Write(test.content); err != nil {
|
||||
t.Errorf("%s: error writing record: %v", test.desc, err)
|
||||
continue
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Errorf("%s: error closing stream: %v", test.desc, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), test.raw) {
|
||||
t.Errorf("%s: wrote wrong content", test.desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type writeOnlyConn struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (c *writeOnlyConn) Write(p []byte) (int, error) {
|
||||
c.buf = append(c.buf, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (c *writeOnlyConn) Read(p []byte) (int, error) {
|
||||
return 0, errors.New("conn is write-only")
|
||||
}
|
||||
|
||||
func (c *writeOnlyConn) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGetValues(t *testing.T) {
|
||||
var rec record
|
||||
rec.h.Type = typeGetValues
|
||||
|
||||
wc := new(writeOnlyConn)
|
||||
c := newChild(wc, nil)
|
||||
err := c.handleRecord(&rec)
|
||||
if err != nil {
|
||||
t.Fatalf("handleRecord: %v", err)
|
||||
}
|
||||
|
||||
const want = "\x01\n\x00\x00\x00\x12\x06\x00" +
|
||||
"\x0f\x01FCGI_MPXS_CONNS1" +
|
||||
"\x00\x00\x00\x00\x00\x00\x01\n\x00\x00\x00\x00\x00\x00"
|
||||
if got := string(wc.buf); got != want {
|
||||
t.Errorf(" got: %q\nwant: %q\n", got, want)
|
||||
}
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func checker(t *testing.T) func(string, error) {
|
||||
return func(call string, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
t.Fatalf("%s: %v", call, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileTransport(t *testing.T) {
|
||||
check := checker(t)
|
||||
|
||||
dname, err := ioutil.TempDir("", "")
|
||||
check("TempDir", err)
|
||||
fname := filepath.Join(dname, "foo.txt")
|
||||
err = ioutil.WriteFile(fname, []byte("Bar"), 0644)
|
||||
check("WriteFile", err)
|
||||
defer os.Remove(dname)
|
||||
defer os.Remove(fname)
|
||||
|
||||
tr := &Transport{}
|
||||
tr.RegisterProtocol("file", NewFileTransport(Dir(dname)))
|
||||
c := &Client{Transport: tr}
|
||||
|
||||
fooURLs := []string{"file:///foo.txt", "file://../foo.txt"}
|
||||
for _, urlstr := range fooURLs {
|
||||
res, err := c.Get(urlstr)
|
||||
check("Get "+urlstr, err)
|
||||
if res.StatusCode != 200 {
|
||||
t.Errorf("for %s, StatusCode = %d, want 200", urlstr, res.StatusCode)
|
||||
}
|
||||
if res.ContentLength != -1 {
|
||||
t.Errorf("for %s, ContentLength = %d, want -1", urlstr, res.ContentLength)
|
||||
}
|
||||
if res.Body == nil {
|
||||
t.Fatalf("for %s, nil Body", urlstr)
|
||||
}
|
||||
slurp, err := ioutil.ReadAll(res.Body)
|
||||
check("ReadAll "+urlstr, err)
|
||||
if string(slurp) != "Bar" {
|
||||
t.Errorf("for %s, got content %q, want %q", urlstr, string(slurp), "Bar")
|
||||
}
|
||||
}
|
||||
|
||||
const badURL = "file://../no-exist.txt"
|
||||
res, err := c.Get(badURL)
|
||||
check("Get "+badURL, err)
|
||||
if res.StatusCode != 404 {
|
||||
t.Errorf("for %s, StatusCode = %d, want 404", badURL, res.StatusCode)
|
||||
}
|
||||
res.Body.Close()
|
||||
}
|
|
@ -1,858 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net"
|
||||
. "net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
testFile = "testdata/file"
|
||||
testFileLen = 11
|
||||
)
|
||||
|
||||
type wantRange struct {
|
||||
start, end int64 // range [start,end)
|
||||
}
|
||||
|
||||
var itoa = strconv.Itoa
|
||||
|
||||
var ServeFileRangeTests = []struct {
|
||||
r string
|
||||
code int
|
||||
ranges []wantRange
|
||||
}{
|
||||
{r: "", code: StatusOK},
|
||||
{r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
|
||||
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
|
||||
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
|
||||
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
|
||||
{r: "bytes=20-", code: StatusRequestedRangeNotSatisfiable},
|
||||
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
|
||||
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
|
||||
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
|
||||
{r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}},
|
||||
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
|
||||
{r: "bytes=0-" + itoa(testFileLen-2), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
|
||||
{r: "bytes=0-" + itoa(testFileLen-1), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
|
||||
{r: "bytes=0-" + itoa(testFileLen), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
|
||||
}
|
||||
|
||||
func TestServeFile(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
ServeFile(w, r, "testdata/file")
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
var err error
|
||||
|
||||
file, err := ioutil.ReadFile(testFile)
|
||||
if err != nil {
|
||||
t.Fatal("reading file:", err)
|
||||
}
|
||||
|
||||
// set up the Request (re-used for all tests)
|
||||
var req Request
|
||||
req.Header = make(Header)
|
||||
if req.URL, err = url.Parse(ts.URL); err != nil {
|
||||
t.Fatal("ParseURL:", err)
|
||||
}
|
||||
req.Method = "GET"
|
||||
|
||||
// straight GET
|
||||
_, body := getBody(t, "straight get", req)
|
||||
if !bytes.Equal(body, file) {
|
||||
t.Fatalf("body mismatch: got %q, want %q", body, file)
|
||||
}
|
||||
|
||||
// Range tests
|
||||
Cases:
|
||||
for _, rt := range ServeFileRangeTests {
|
||||
if rt.r != "" {
|
||||
req.Header.Set("Range", rt.r)
|
||||
}
|
||||
resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
|
||||
if resp.StatusCode != rt.code {
|
||||
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
|
||||
}
|
||||
if rt.code == StatusRequestedRangeNotSatisfiable {
|
||||
continue
|
||||
}
|
||||
wantContentRange := ""
|
||||
if len(rt.ranges) == 1 {
|
||||
rng := rt.ranges[0]
|
||||
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
|
||||
}
|
||||
cr := resp.Header.Get("Content-Range")
|
||||
if cr != wantContentRange {
|
||||
t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
|
||||
}
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
if len(rt.ranges) == 1 {
|
||||
rng := rt.ranges[0]
|
||||
wantBody := file[rng.start:rng.end]
|
||||
if !bytes.Equal(body, wantBody) {
|
||||
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
|
||||
}
|
||||
if strings.HasPrefix(ct, "multipart/byteranges") {
|
||||
t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
|
||||
}
|
||||
}
|
||||
if len(rt.ranges) > 1 {
|
||||
typ, params, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
|
||||
continue
|
||||
}
|
||||
if typ != "multipart/byteranges" {
|
||||
t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
|
||||
continue
|
||||
}
|
||||
if params["boundary"] == "" {
|
||||
t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
|
||||
continue
|
||||
}
|
||||
if g, w := resp.ContentLength, int64(len(body)); g != w {
|
||||
t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
|
||||
continue
|
||||
}
|
||||
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
|
||||
for ri, rng := range rt.ranges {
|
||||
part, err := mr.NextPart()
|
||||
if err != nil {
|
||||
t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
|
||||
continue Cases
|
||||
}
|
||||
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
|
||||
if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
|
||||
t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
|
||||
}
|
||||
body, err := ioutil.ReadAll(part)
|
||||
if err != nil {
|
||||
t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
|
||||
continue Cases
|
||||
}
|
||||
wantBody := file[rng.start:rng.end]
|
||||
if !bytes.Equal(body, wantBody) {
|
||||
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
|
||||
}
|
||||
}
|
||||
_, err = mr.NextPart()
|
||||
if err != io.EOF {
|
||||
t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var fsRedirectTestData = []struct {
|
||||
original, redirect string
|
||||
}{
|
||||
{"/test/index.html", "/test/"},
|
||||
{"/test/testdata", "/test/testdata/"},
|
||||
{"/test/testdata/file/", "/test/testdata/file"},
|
||||
}
|
||||
|
||||
func TestFSRedirect(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
|
||||
defer ts.Close()
|
||||
|
||||
for _, data := range fsRedirectTestData {
|
||||
res, err := Get(ts.URL + data.original)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res.Body.Close()
|
||||
if g, e := res.Request.URL.Path, data.redirect; g != e {
|
||||
t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testFileSystem struct {
|
||||
open func(name string) (File, error)
|
||||
}
|
||||
|
||||
func (fs *testFileSystem) Open(name string) (File, error) {
|
||||
return fs.open(name)
|
||||
}
|
||||
|
||||
func TestFileServerCleans(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ch := make(chan string, 1)
|
||||
fs := FileServer(&testFileSystem{func(name string) (File, error) {
|
||||
ch <- name
|
||||
return nil, errors.New("file does not exist")
|
||||
}})
|
||||
tests := []struct {
|
||||
reqPath, openArg string
|
||||
}{
|
||||
{"/foo.txt", "/foo.txt"},
|
||||
{"//foo.txt", "/foo.txt"},
|
||||
{"/../foo.txt", "/foo.txt"},
|
||||
}
|
||||
req, _ := NewRequest("GET", "http://example.com", nil)
|
||||
for n, test := range tests {
|
||||
rec := httptest.NewRecorder()
|
||||
req.URL.Path = test.reqPath
|
||||
fs.ServeHTTP(rec, req)
|
||||
if got := <-ch; got != test.openArg {
|
||||
t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileServerEscapesNames(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
const dirListPrefix = "<pre>\n"
|
||||
const dirListSuffix = "\n</pre>\n"
|
||||
tests := []struct {
|
||||
name, escaped string
|
||||
}{
|
||||
{`simple_name`, `<a href="simple_name">simple_name</a>`},
|
||||
{`"'<>&`, `<a href="%22%27%3C%3E&">"'<>&</a>`},
|
||||
{`?foo=bar#baz`, `<a href="%3Ffoo=bar%23baz">?foo=bar#baz</a>`},
|
||||
{`<combo>?foo`, `<a href="%3Ccombo%3E%3Ffoo"><combo>?foo</a>`},
|
||||
}
|
||||
|
||||
// We put each test file in its own directory in the fakeFS so we can look at it in isolation.
|
||||
fs := make(fakeFS)
|
||||
for i, test := range tests {
|
||||
testFile := &fakeFileInfo{basename: test.name}
|
||||
fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{
|
||||
dir: true,
|
||||
modtime: time.Unix(1000000000, 0).UTC(),
|
||||
ents: []*fakeFileInfo{testFile},
|
||||
}
|
||||
fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile
|
||||
}
|
||||
|
||||
ts := httptest.NewServer(FileServer(&fs))
|
||||
defer ts.Close()
|
||||
for i, test := range tests {
|
||||
url := fmt.Sprintf("%s/%d", ts.URL, i)
|
||||
res, err := Get(url)
|
||||
if err != nil {
|
||||
t.Fatalf("test %q: Get: %v", test.name, err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("test %q: read Body: %v", test.name, err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) {
|
||||
t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix)
|
||||
}
|
||||
if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped {
|
||||
t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped)
|
||||
}
|
||||
res.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func mustRemoveAll(dir string) {
|
||||
err := os.RemoveAll(dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileServerImplicitLeadingSlash(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
tempDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("TempDir: %v", err)
|
||||
}
|
||||
defer mustRemoveAll(tempDir)
|
||||
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
|
||||
t.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
|
||||
defer ts.Close()
|
||||
get := func(suffix string) string {
|
||||
res, err := Get(ts.URL + suffix)
|
||||
if err != nil {
|
||||
t.Fatalf("Get %s: %v", suffix, err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll %s: %v", suffix, err)
|
||||
}
|
||||
res.Body.Close()
|
||||
return string(b)
|
||||
}
|
||||
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
|
||||
t.Logf("expected a directory listing with foo.txt, got %q", s)
|
||||
}
|
||||
if s := get("/bar/foo.txt"); s != "Hello world" {
|
||||
t.Logf("expected %q, got %q", "Hello world", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirJoin(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("skipping test on windows")
|
||||
}
|
||||
wfi, err := os.Stat("/etc/hosts")
|
||||
if err != nil {
|
||||
t.Skip("skipping test; no /etc/hosts file")
|
||||
}
|
||||
test := func(d Dir, name string) {
|
||||
f, err := d.Open(name)
|
||||
if err != nil {
|
||||
t.Fatalf("open of %s: %v", name, err)
|
||||
}
|
||||
defer f.Close()
|
||||
gfi, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatalf("stat of %s: %v", name, err)
|
||||
}
|
||||
if !os.SameFile(gfi, wfi) {
|
||||
t.Errorf("%s got different file", name)
|
||||
}
|
||||
}
|
||||
test(Dir("/etc/"), "/hosts")
|
||||
test(Dir("/etc/"), "hosts")
|
||||
test(Dir("/etc/"), "../../../../hosts")
|
||||
test(Dir("/etc"), "/hosts")
|
||||
test(Dir("/etc"), "hosts")
|
||||
test(Dir("/etc"), "../../../../hosts")
|
||||
|
||||
// Not really directories, but since we use this trick in
|
||||
// ServeFile, test it:
|
||||
test(Dir("/etc/hosts"), "")
|
||||
test(Dir("/etc/hosts"), "/")
|
||||
test(Dir("/etc/hosts"), "../")
|
||||
}
|
||||
|
||||
func TestEmptyDirOpenCWD(t *testing.T) {
|
||||
test := func(d Dir) {
|
||||
name := "fs_test.go"
|
||||
f, err := d.Open(name)
|
||||
if err != nil {
|
||||
t.Fatalf("open of %s: %v", name, err)
|
||||
}
|
||||
defer f.Close()
|
||||
}
|
||||
test(Dir(""))
|
||||
test(Dir("."))
|
||||
test(Dir("./"))
|
||||
}
|
||||
|
||||
func TestServeFileContentType(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
const ctype = "icecream/chocolate"
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
switch r.FormValue("override") {
|
||||
case "1":
|
||||
w.Header().Set("Content-Type", ctype)
|
||||
case "2":
|
||||
// Explicitly inhibit sniffing.
|
||||
w.Header()["Content-Type"] = []string{}
|
||||
}
|
||||
ServeFile(w, r, "testdata/file")
|
||||
}))
|
||||
defer ts.Close()
|
||||
get := func(override string, want []string) {
|
||||
resp, err := Get(ts.URL + "?override=" + override)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) {
|
||||
t.Errorf("Content-Type mismatch: got %v, want %v", h, want)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
get("0", []string{"text/plain; charset=utf-8"})
|
||||
get("1", []string{ctype})
|
||||
get("2", nil)
|
||||
}
|
||||
|
||||
func TestServeFileMimeType(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
ServeFile(w, r, "testdata/style.css")
|
||||
}))
|
||||
defer ts.Close()
|
||||
resp, err := Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
want := "text/css; charset=utf-8"
|
||||
if h := resp.Header.Get("Content-Type"); h != want {
|
||||
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServeFileFromCWD(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
ServeFile(w, r, "fs_test.go")
|
||||
}))
|
||||
defer ts.Close()
|
||||
r, err := Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.Body.Close()
|
||||
if r.StatusCode != 200 {
|
||||
t.Fatalf("expected 200 OK, got %s", r.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServeFileWithContentEncoding(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
w.Header().Set("Content-Encoding", "foo")
|
||||
ServeFile(w, r, "testdata/file")
|
||||
}))
|
||||
defer ts.Close()
|
||||
resp, err := Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if g, e := resp.ContentLength, int64(-1); g != e {
|
||||
t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServeIndexHtml(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
const want = "index.html says hello\n"
|
||||
ts := httptest.NewServer(FileServer(Dir(".")))
|
||||
defer ts.Close()
|
||||
|
||||
for _, path := range []string{"/testdata/", "/testdata/index.html"} {
|
||||
res, err := Get(ts.URL + path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatal("reading Body:", err)
|
||||
}
|
||||
if s := string(b); s != want {
|
||||
t.Errorf("for path %q got %q, want %q", path, s, want)
|
||||
}
|
||||
res.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileServerZeroByte(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ts := httptest.NewServer(FileServer(Dir(".")))
|
||||
defer ts.Close()
|
||||
|
||||
res, err := Get(ts.URL + "/..\x00")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatal("reading Body:", err)
|
||||
}
|
||||
if res.StatusCode == 200 {
|
||||
t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
|
||||
}
|
||||
}
|
||||
|
||||
type fakeFileInfo struct {
|
||||
dir bool
|
||||
basename string
|
||||
modtime time.Time
|
||||
ents []*fakeFileInfo
|
||||
contents string
|
||||
}
|
||||
|
||||
func (f *fakeFileInfo) Name() string { return f.basename }
|
||||
func (f *fakeFileInfo) Sys() interface{} { return nil }
|
||||
func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
|
||||
func (f *fakeFileInfo) IsDir() bool { return f.dir }
|
||||
func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
|
||||
func (f *fakeFileInfo) Mode() os.FileMode {
|
||||
if f.dir {
|
||||
return 0755 | os.ModeDir
|
||||
}
|
||||
return 0644
|
||||
}
|
||||
|
||||
type fakeFile struct {
|
||||
io.ReadSeeker
|
||||
fi *fakeFileInfo
|
||||
path string // as opened
|
||||
entpos int
|
||||
}
|
||||
|
||||
func (f *fakeFile) Close() error { return nil }
|
||||
func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
|
||||
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if !f.fi.dir {
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
var fis []os.FileInfo
|
||||
|
||||
limit := f.entpos + count
|
||||
if count <= 0 || limit > len(f.fi.ents) {
|
||||
limit = len(f.fi.ents)
|
||||
}
|
||||
for ; f.entpos < limit; f.entpos++ {
|
||||
fis = append(fis, f.fi.ents[f.entpos])
|
||||
}
|
||||
|
||||
if len(fis) == 0 && count > 0 {
|
||||
return fis, io.EOF
|
||||
} else {
|
||||
return fis, nil
|
||||
}
|
||||
}
|
||||
|
||||
type fakeFS map[string]*fakeFileInfo
|
||||
|
||||
func (fs fakeFS) Open(name string) (File, error) {
|
||||
name = path.Clean(name)
|
||||
f, ok := fs[name]
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
|
||||
}
|
||||
|
||||
func TestDirectoryIfNotModified(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
const indexContents = "I am a fake index.html file"
|
||||
fileMod := time.Unix(1000000000, 0).UTC()
|
||||
fileModStr := fileMod.Format(TimeFormat)
|
||||
dirMod := time.Unix(123, 0).UTC()
|
||||
indexFile := &fakeFileInfo{
|
||||
basename: "index.html",
|
||||
modtime: fileMod,
|
||||
contents: indexContents,
|
||||
}
|
||||
fs := fakeFS{
|
||||
"/": &fakeFileInfo{
|
||||
dir: true,
|
||||
modtime: dirMod,
|
||||
ents: []*fakeFileInfo{indexFile},
|
||||
},
|
||||
"/index.html": indexFile,
|
||||
}
|
||||
|
||||
ts := httptest.NewServer(FileServer(fs))
|
||||
defer ts.Close()
|
||||
|
||||
res, err := Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != indexContents {
|
||||
t.Fatalf("Got body %q; want %q", b, indexContents)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
lastMod := res.Header.Get("Last-Modified")
|
||||
if lastMod != fileModStr {
|
||||
t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
|
||||
}
|
||||
|
||||
req, _ := NewRequest("GET", ts.URL, nil)
|
||||
req.Header.Set("If-Modified-Since", lastMod)
|
||||
|
||||
res, err = DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 304 {
|
||||
t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
// Advance the index.html file's modtime, but not the directory's.
|
||||
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
|
||||
|
||||
res, err = DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
|
||||
}
|
||||
res.Body.Close()
|
||||
}
|
||||
|
||||
func mustStat(t *testing.T, fileName string) os.FileInfo {
|
||||
fi, err := os.Stat(fileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return fi
|
||||
}
|
||||
|
||||
func TestServeContent(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
type serveParam struct {
|
||||
name string
|
||||
modtime time.Time
|
||||
content io.ReadSeeker
|
||||
contentType string
|
||||
etag string
|
||||
}
|
||||
servec := make(chan serveParam, 1)
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
p := <-servec
|
||||
if p.etag != "" {
|
||||
w.Header().Set("ETag", p.etag)
|
||||
}
|
||||
if p.contentType != "" {
|
||||
w.Header().Set("Content-Type", p.contentType)
|
||||
}
|
||||
ServeContent(w, r, p.name, p.modtime, p.content)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
type testCase struct {
|
||||
// One of file or content must be set:
|
||||
file string
|
||||
content io.ReadSeeker
|
||||
|
||||
modtime time.Time
|
||||
serveETag string // optional
|
||||
serveContentType string // optional
|
||||
reqHeader map[string]string
|
||||
wantLastMod string
|
||||
wantContentType string
|
||||
wantStatus int
|
||||
}
|
||||
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
|
||||
tests := map[string]testCase{
|
||||
"no_last_modified": {
|
||||
file: "testdata/style.css",
|
||||
wantContentType: "text/css; charset=utf-8",
|
||||
wantStatus: 200,
|
||||
},
|
||||
"with_last_modified": {
|
||||
file: "testdata/index.html",
|
||||
wantContentType: "text/html; charset=utf-8",
|
||||
modtime: htmlModTime,
|
||||
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
|
||||
wantStatus: 200,
|
||||
},
|
||||
"not_modified_modtime": {
|
||||
file: "testdata/style.css",
|
||||
modtime: htmlModTime,
|
||||
reqHeader: map[string]string{
|
||||
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
|
||||
},
|
||||
wantStatus: 304,
|
||||
},
|
||||
"not_modified_modtime_with_contenttype": {
|
||||
file: "testdata/style.css",
|
||||
serveContentType: "text/css", // explicit content type
|
||||
modtime: htmlModTime,
|
||||
reqHeader: map[string]string{
|
||||
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
|
||||
},
|
||||
wantStatus: 304,
|
||||
},
|
||||
"not_modified_etag": {
|
||||
file: "testdata/style.css",
|
||||
serveETag: `"foo"`,
|
||||
reqHeader: map[string]string{
|
||||
"If-None-Match": `"foo"`,
|
||||
},
|
||||
wantStatus: 304,
|
||||
},
|
||||
"not_modified_etag_no_seek": {
|
||||
content: panicOnSeek{nil}, // should never be called
|
||||
serveETag: `"foo"`,
|
||||
reqHeader: map[string]string{
|
||||
"If-None-Match": `"foo"`,
|
||||
},
|
||||
wantStatus: 304,
|
||||
},
|
||||
"range_good": {
|
||||
file: "testdata/style.css",
|
||||
serveETag: `"A"`,
|
||||
reqHeader: map[string]string{
|
||||
"Range": "bytes=0-4",
|
||||
},
|
||||
wantStatus: StatusPartialContent,
|
||||
wantContentType: "text/css; charset=utf-8",
|
||||
},
|
||||
// An If-Range resource for entity "A", but entity "B" is now current.
|
||||
// The Range request should be ignored.
|
||||
"range_no_match": {
|
||||
file: "testdata/style.css",
|
||||
serveETag: `"A"`,
|
||||
reqHeader: map[string]string{
|
||||
"Range": "bytes=0-4",
|
||||
"If-Range": `"B"`,
|
||||
},
|
||||
wantStatus: 200,
|
||||
wantContentType: "text/css; charset=utf-8",
|
||||
},
|
||||
}
|
||||
for testName, tt := range tests {
|
||||
var content io.ReadSeeker
|
||||
if tt.file != "" {
|
||||
f, err := os.Open(tt.file)
|
||||
if err != nil {
|
||||
t.Fatalf("test %q: %v", testName, err)
|
||||
}
|
||||
defer f.Close()
|
||||
content = f
|
||||
} else {
|
||||
content = tt.content
|
||||
}
|
||||
|
||||
servec <- serveParam{
|
||||
name: filepath.Base(tt.file),
|
||||
content: content,
|
||||
modtime: tt.modtime,
|
||||
etag: tt.serveETag,
|
||||
contentType: tt.serveContentType,
|
||||
}
|
||||
req, err := NewRequest("GET", ts.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for k, v := range tt.reqHeader {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
res, err := DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
io.Copy(ioutil.Discard, res.Body)
|
||||
res.Body.Close()
|
||||
if res.StatusCode != tt.wantStatus {
|
||||
t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus)
|
||||
}
|
||||
if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
|
||||
t.Errorf("test %q: content-type = %q, want %q", testName, g, e)
|
||||
}
|
||||
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
|
||||
t.Errorf("test %q: last-modified = %q, want %q", testName, g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// verifies that sendfile is being used on Linux
|
||||
func TestLinuxSendfile(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("skipping; linux-only test")
|
||||
}
|
||||
if _, err := exec.LookPath("strace"); err != nil {
|
||||
t.Skip("skipping; strace not found in path")
|
||||
}
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lnf, err := ln.(*net.TCPListener).File()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ln.Close()
|
||||
|
||||
var buf bytes.Buffer
|
||||
child := exec.Command("strace", "-f", "-q", "-e", "trace=sendfile,sendfile64", os.Args[0], "-test.run=TestLinuxSendfileChild")
|
||||
child.ExtraFiles = append(child.ExtraFiles, lnf)
|
||||
child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
|
||||
child.Stdout = &buf
|
||||
child.Stderr = &buf
|
||||
if err := child.Start(); err != nil {
|
||||
t.Skipf("skipping; failed to start straced child: %v", err)
|
||||
}
|
||||
|
||||
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
|
||||
if err != nil {
|
||||
t.Fatalf("http client error: %v", err)
|
||||
}
|
||||
_, err = io.Copy(ioutil.Discard, res.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("client body read error: %v", err)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
// Force child to exit cleanly.
|
||||
Get(fmt.Sprintf("http://%s/quit", ln.Addr()))
|
||||
child.Wait()
|
||||
|
||||
rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+\)\s*=\s*\d+\s*\n`)
|
||||
rxResume := regexp.MustCompile(`<\.\.\. sendfile(64)? resumed> \)\s*=\s*\d+\s*\n`)
|
||||
out := buf.String()
|
||||
if !rx.MatchString(out) && !rxResume.MatchString(out) {
|
||||
t.Errorf("no sendfile system call found in:\n%s", out)
|
||||
}
|
||||
}
|
||||
|
||||
func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
|
||||
r, err := DefaultClient.Do(&req)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
|
||||
}
|
||||
return r, b
|
||||
}
|
||||
|
||||
// TestLinuxSendfileChild isn't a real test. It's used as a helper process
|
||||
// for TestLinuxSendfile.
|
||||
func TestLinuxSendfileChild(*testing.T) {
|
||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||
return
|
||||
}
|
||||
defer os.Exit(0)
|
||||
fd3 := os.NewFile(3, "ephemeral-port-listener")
|
||||
ln, err := net.FileListener(fd3)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mux := NewServeMux()
|
||||
mux.Handle("/", FileServer(Dir("testdata")))
|
||||
mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
|
||||
os.Exit(0)
|
||||
})
|
||||
s := &Server{Handler: mux}
|
||||
err = s.Serve(ln)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type panicOnSeek struct{ io.ReadSeeker }
|
|
@ -1,212 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var headerWriteTests = []struct {
|
||||
h Header
|
||||
exclude map[string]bool
|
||||
expected string
|
||||
}{
|
||||
{Header{}, nil, ""},
|
||||
{
|
||||
Header{
|
||||
"Content-Type": {"text/html; charset=UTF-8"},
|
||||
"Content-Length": {"0"},
|
||||
},
|
||||
nil,
|
||||
"Content-Length: 0\r\nContent-Type: text/html; charset=UTF-8\r\n",
|
||||
},
|
||||
{
|
||||
Header{
|
||||
"Content-Length": {"0", "1", "2"},
|
||||
},
|
||||
nil,
|
||||
"Content-Length: 0\r\nContent-Length: 1\r\nContent-Length: 2\r\n",
|
||||
},
|
||||
{
|
||||
Header{
|
||||
"Expires": {"-1"},
|
||||
"Content-Length": {"0"},
|
||||
"Content-Encoding": {"gzip"},
|
||||
},
|
||||
map[string]bool{"Content-Length": true},
|
||||
"Content-Encoding: gzip\r\nExpires: -1\r\n",
|
||||
},
|
||||
{
|
||||
Header{
|
||||
"Expires": {"-1"},
|
||||
"Content-Length": {"0", "1", "2"},
|
||||
"Content-Encoding": {"gzip"},
|
||||
},
|
||||
map[string]bool{"Content-Length": true},
|
||||
"Content-Encoding: gzip\r\nExpires: -1\r\n",
|
||||
},
|
||||
{
|
||||
Header{
|
||||
"Expires": {"-1"},
|
||||
"Content-Length": {"0"},
|
||||
"Content-Encoding": {"gzip"},
|
||||
},
|
||||
map[string]bool{"Content-Length": true, "Expires": true, "Content-Encoding": true},
|
||||
"",
|
||||
},
|
||||
{
|
||||
Header{
|
||||
"Nil": nil,
|
||||
"Empty": {},
|
||||
"Blank": {""},
|
||||
"Double-Blank": {"", ""},
|
||||
},
|
||||
nil,
|
||||
"Blank: \r\nDouble-Blank: \r\nDouble-Blank: \r\n",
|
||||
},
|
||||
// Tests header sorting when over the insertion sort threshold side:
|
||||
{
|
||||
Header{
|
||||
"k1": {"1a", "1b"},
|
||||
"k2": {"2a", "2b"},
|
||||
"k3": {"3a", "3b"},
|
||||
"k4": {"4a", "4b"},
|
||||
"k5": {"5a", "5b"},
|
||||
"k6": {"6a", "6b"},
|
||||
"k7": {"7a", "7b"},
|
||||
"k8": {"8a", "8b"},
|
||||
"k9": {"9a", "9b"},
|
||||
},
|
||||
map[string]bool{"k5": true},
|
||||
"k1: 1a\r\nk1: 1b\r\nk2: 2a\r\nk2: 2b\r\nk3: 3a\r\nk3: 3b\r\n" +
|
||||
"k4: 4a\r\nk4: 4b\r\nk6: 6a\r\nk6: 6b\r\n" +
|
||||
"k7: 7a\r\nk7: 7b\r\nk8: 8a\r\nk8: 8b\r\nk9: 9a\r\nk9: 9b\r\n",
|
||||
},
|
||||
}
|
||||
|
||||
func TestHeaderWrite(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
for i, test := range headerWriteTests {
|
||||
test.h.WriteSubset(&buf, test.exclude)
|
||||
if buf.String() != test.expected {
|
||||
t.Errorf("#%d:\n got: %q\nwant: %q", i, buf.String(), test.expected)
|
||||
}
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
var parseTimeTests = []struct {
|
||||
h Header
|
||||
err bool
|
||||
}{
|
||||
{Header{"Date": {""}}, true},
|
||||
{Header{"Date": {"invalid"}}, true},
|
||||
{Header{"Date": {"1994-11-06T08:49:37Z00:00"}}, true},
|
||||
{Header{"Date": {"Sun, 06 Nov 1994 08:49:37 GMT"}}, false},
|
||||
{Header{"Date": {"Sunday, 06-Nov-94 08:49:37 GMT"}}, false},
|
||||
{Header{"Date": {"Sun Nov 6 08:49:37 1994"}}, false},
|
||||
}
|
||||
|
||||
func TestParseTime(t *testing.T) {
|
||||
expect := time.Date(1994, 11, 6, 8, 49, 37, 0, time.UTC)
|
||||
for i, test := range parseTimeTests {
|
||||
d, err := ParseTime(test.h.Get("Date"))
|
||||
if err != nil {
|
||||
if !test.err {
|
||||
t.Errorf("#%d:\n got err: %v", i, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if test.err {
|
||||
t.Errorf("#%d:\n should err", i)
|
||||
continue
|
||||
}
|
||||
if !expect.Equal(d) {
|
||||
t.Errorf("#%d:\n got: %v\nwant: %v", i, d, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type hasTokenTest struct {
|
||||
header string
|
||||
token string
|
||||
want bool
|
||||
}
|
||||
|
||||
var hasTokenTests = []hasTokenTest{
|
||||
{"", "", false},
|
||||
{"", "foo", false},
|
||||
{"foo", "foo", true},
|
||||
{"foo ", "foo", true},
|
||||
{" foo", "foo", true},
|
||||
{" foo ", "foo", true},
|
||||
{"foo,bar", "foo", true},
|
||||
{"bar,foo", "foo", true},
|
||||
{"bar, foo", "foo", true},
|
||||
{"bar,foo, baz", "foo", true},
|
||||
{"bar, foo,baz", "foo", true},
|
||||
{"bar,foo, baz", "foo", true},
|
||||
{"bar, foo, baz", "foo", true},
|
||||
{"FOO", "foo", true},
|
||||
{"FOO ", "foo", true},
|
||||
{" FOO", "foo", true},
|
||||
{" FOO ", "foo", true},
|
||||
{"FOO,BAR", "foo", true},
|
||||
{"BAR,FOO", "foo", true},
|
||||
{"BAR, FOO", "foo", true},
|
||||
{"BAR,FOO, baz", "foo", true},
|
||||
{"BAR, FOO,BAZ", "foo", true},
|
||||
{"BAR,FOO, BAZ", "foo", true},
|
||||
{"BAR, FOO, BAZ", "foo", true},
|
||||
{"foobar", "foo", false},
|
||||
{"barfoo ", "foo", false},
|
||||
}
|
||||
|
||||
func TestHasToken(t *testing.T) {
|
||||
for _, tt := range hasTokenTests {
|
||||
if hasToken(tt.header, tt.token) != tt.want {
|
||||
t.Errorf("hasToken(%q, %q) = %v; want %v", tt.header, tt.token, !tt.want, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testHeader = Header{
|
||||
"Content-Length": {"123"},
|
||||
"Content-Type": {"text/plain"},
|
||||
"Date": {"some date at some time Z"},
|
||||
"Server": {DefaultUserAgent},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
func BenchmarkHeaderWriteSubset(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
testHeader.WriteSubset(&buf, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderWriteSubsetAllocs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping alloc test in short mode")
|
||||
}
|
||||
if raceEnabled {
|
||||
t.Skip("skipping test under race detector")
|
||||
}
|
||||
if runtime.GOMAXPROCS(0) > 1 {
|
||||
t.Skip("skipping; GOMAXPROCS>1")
|
||||
}
|
||||
n := testing.AllocsPerRun(100, func() {
|
||||
buf.Reset()
|
||||
testHeader.WriteSubset(&buf, nil)
|
||||
})
|
||||
if n > 0 {
|
||||
t.Errorf("allocs = %g; want 0", n)
|
||||
}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httptest_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
)
|
||||
|
||||
func ExampleResponseRecorder() {
|
||||
handler := func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "something failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/foo", nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
handler(w, req)
|
||||
|
||||
fmt.Printf("%d - %s", w.Code, w.Body.String())
|
||||
// Output: 500 - something failed
|
||||
}
|
||||
|
||||
func ExampleServer() {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "Hello, client")
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
res, err := http.Get(ts.URL)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
greeting, err := ioutil.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s", greeting)
|
||||
// Output: Hello, client
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httptest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRecorder(t *testing.T) {
|
||||
type checkFunc func(*ResponseRecorder) error
|
||||
check := func(fns ...checkFunc) []checkFunc { return fns }
|
||||
|
||||
hasStatus := func(wantCode int) checkFunc {
|
||||
return func(rec *ResponseRecorder) error {
|
||||
if rec.Code != wantCode {
|
||||
return fmt.Errorf("Status = %d; want %d", rec.Code, wantCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
hasContents := func(want string) checkFunc {
|
||||
return func(rec *ResponseRecorder) error {
|
||||
if rec.Body.String() != want {
|
||||
return fmt.Errorf("wrote = %q; want %q", rec.Body.String(), want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
hasFlush := func(want bool) checkFunc {
|
||||
return func(rec *ResponseRecorder) error {
|
||||
if rec.Flushed != want {
|
||||
return fmt.Errorf("Flushed = %v; want %v", rec.Flushed, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
h func(w http.ResponseWriter, r *http.Request)
|
||||
checks []checkFunc
|
||||
}{
|
||||
{
|
||||
"200 default",
|
||||
func(w http.ResponseWriter, r *http.Request) {},
|
||||
check(hasStatus(200), hasContents("")),
|
||||
},
|
||||
{
|
||||
"first code only",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(201)
|
||||
w.WriteHeader(202)
|
||||
w.Write([]byte("hi"))
|
||||
},
|
||||
check(hasStatus(201), hasContents("hi")),
|
||||
},
|
||||
{
|
||||
"write sends 200",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("hi first"))
|
||||
w.WriteHeader(201)
|
||||
w.WriteHeader(202)
|
||||
},
|
||||
check(hasStatus(200), hasContents("hi first"), hasFlush(false)),
|
||||
},
|
||||
{
|
||||
"flush",
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.(http.Flusher).Flush() // also sends a 200
|
||||
w.WriteHeader(201)
|
||||
},
|
||||
check(hasStatus(200), hasFlush(true)),
|
||||
},
|
||||
}
|
||||
r, _ := http.NewRequest("GET", "http://foo.com/", nil)
|
||||
for _, tt := range tests {
|
||||
h := http.HandlerFunc(tt.h)
|
||||
rec := NewRecorder()
|
||||
h.ServeHTTP(rec, r)
|
||||
for _, check := range tt.checks {
|
||||
if err := check(rec); err != nil {
|
||||
t.Errorf("%s: %v", tt.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httptest
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestServer(t *testing.T) {
|
||||
ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("hello"))
|
||||
}))
|
||||
defer ts.Close()
|
||||
res, err := http.Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(got) != "hello" {
|
||||
t.Errorf("got %q, want hello", string(got))
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue7264(t *testing.T) {
|
||||
for i := 0; i < 1000; i++ {
|
||||
func() {
|
||||
inHandler := make(chan bool, 1)
|
||||
ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
inHandler <- true
|
||||
}))
|
||||
defer ts.Close()
|
||||
tr := &http.Transport{
|
||||
ResponseHeaderTimeout: time.Nanosecond,
|
||||
}
|
||||
defer tr.CloseIdleConnections()
|
||||
c := &http.Client{Transport: tr}
|
||||
res, err := c.Get(ts.URL)
|
||||
<-inHandler
|
||||
if err == nil {
|
||||
res.Body.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
|
@ -1,159 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code is duplicated in net/http and net/http/httputil.
|
||||
// Please make any changes in both files.
|
||||
|
||||
package httputil
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChunk(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
|
||||
w := newChunkedWriter(&b)
|
||||
const chunk1 = "hello, "
|
||||
const chunk2 = "world! 0123456789abcdef"
|
||||
w.Write([]byte(chunk1))
|
||||
w.Write([]byte(chunk2))
|
||||
w.Close()
|
||||
|
||||
if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
|
||||
t.Fatalf("chunk writer wrote %q; want %q", g, e)
|
||||
}
|
||||
|
||||
r := newChunkedReader(&b)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Logf(`data: "%s"`, data)
|
||||
t.Fatalf("ReadAll from reader: %v", err)
|
||||
}
|
||||
if g, e := string(data), chunk1+chunk2; g != e {
|
||||
t.Errorf("chunk reader read %q; want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChunkReadMultiple(t *testing.T) {
|
||||
// Bunch of small chunks, all read together.
|
||||
{
|
||||
var b bytes.Buffer
|
||||
w := newChunkedWriter(&b)
|
||||
w.Write([]byte("foo"))
|
||||
w.Write([]byte("bar"))
|
||||
w.Close()
|
||||
|
||||
r := newChunkedReader(&b)
|
||||
buf := make([]byte, 10)
|
||||
n, err := r.Read(buf)
|
||||
if n != 6 || err != io.EOF {
|
||||
t.Errorf("Read = %d, %v; want 6, EOF", n, err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
if string(buf) != "foobar" {
|
||||
t.Errorf("Read = %q; want %q", buf, "foobar")
|
||||
}
|
||||
}
|
||||
|
||||
// One big chunk followed by a little chunk, but the small bufio.Reader size
|
||||
// should prevent the second chunk header from being read.
|
||||
{
|
||||
var b bytes.Buffer
|
||||
w := newChunkedWriter(&b)
|
||||
// fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes,
|
||||
// the same as the bufio ReaderSize below (the minimum), so even
|
||||
// though we're going to try to Read with a buffer larger enough to also
|
||||
// receive "foo", the second chunk header won't be read yet.
|
||||
const fillBufChunk = "0123456789a"
|
||||
const shortChunk = "foo"
|
||||
w.Write([]byte(fillBufChunk))
|
||||
w.Write([]byte(shortChunk))
|
||||
w.Close()
|
||||
|
||||
r := newChunkedReader(bufio.NewReaderSize(&b, 16))
|
||||
buf := make([]byte, len(fillBufChunk)+len(shortChunk))
|
||||
n, err := r.Read(buf)
|
||||
if n != len(fillBufChunk) || err != nil {
|
||||
t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk))
|
||||
}
|
||||
buf = buf[:n]
|
||||
if string(buf) != fillBufChunk {
|
||||
t.Errorf("Read = %q; want %q", buf, fillBufChunk)
|
||||
}
|
||||
|
||||
n, err = r.Read(buf)
|
||||
if n != len(shortChunk) || err != io.EOF {
|
||||
t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk))
|
||||
}
|
||||
}
|
||||
|
||||
// And test that we see an EOF chunk, even though our buffer is already full:
|
||||
{
|
||||
r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n")))
|
||||
buf := make([]byte, 3)
|
||||
n, err := r.Read(buf)
|
||||
if n != 3 || err != io.EOF {
|
||||
t.Errorf("Read = %d, %v; want 3, EOF", n, err)
|
||||
}
|
||||
if string(buf) != "foo" {
|
||||
t.Errorf("buf = %q; want foo", buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChunkReaderAllocs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
w := newChunkedWriter(&buf)
|
||||
a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc")
|
||||
w.Write(a)
|
||||
w.Write(b)
|
||||
w.Write(c)
|
||||
w.Close()
|
||||
|
||||
readBuf := make([]byte, len(a)+len(b)+len(c)+1)
|
||||
byter := bytes.NewReader(buf.Bytes())
|
||||
bufr := bufio.NewReader(byter)
|
||||
mallocs := testing.AllocsPerRun(100, func() {
|
||||
byter.Seek(0, 0)
|
||||
bufr.Reset(byter)
|
||||
r := newChunkedReader(bufr)
|
||||
n, err := io.ReadFull(r, readBuf)
|
||||
if n != len(readBuf)-1 {
|
||||
t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1)
|
||||
}
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read error = %v; want ErrUnexpectedEOF", err)
|
||||
}
|
||||
})
|
||||
if mallocs > 1.5 {
|
||||
t.Errorf("mallocs = %v; want 1", mallocs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHexUint(t *testing.T) {
|
||||
for i := uint64(0); i <= 1234; i++ {
|
||||
line := []byte(fmt.Sprintf("%x", i))
|
||||
got, err := parseHexUint(line)
|
||||
if err != nil {
|
||||
t.Fatalf("on %d: %v", i, err)
|
||||
}
|
||||
if got != i {
|
||||
t.Errorf("for input %q = %d; want %d", line, got, i)
|
||||
}
|
||||
}
|
||||
_, err := parseHexUint([]byte("bogus"))
|
||||
if err == nil {
|
||||
t.Error("expected error on bogus input")
|
||||
}
|
||||
}
|
|
@ -1,263 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httputil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type dumpTest struct {
|
||||
Req http.Request
|
||||
Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
|
||||
|
||||
WantDump string
|
||||
WantDumpOut string
|
||||
NoBody bool // if true, set DumpRequest{,Out} body to false
|
||||
}
|
||||
|
||||
var dumpTests = []dumpTest{
|
||||
|
||||
// HTTP/1.1 => chunked coding; body; empty trailer
|
||||
{
|
||||
Req: http.Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/search",
|
||||
},
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
TransferEncoding: []string{"chunked"},
|
||||
},
|
||||
|
||||
Body: []byte("abcdef"),
|
||||
|
||||
WantDump: "GET /search HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
chunk("abcdef") + chunk(""),
|
||||
},
|
||||
|
||||
// Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
|
||||
// and doesn't add a User-Agent.
|
||||
{
|
||||
Req: http.Request{
|
||||
Method: "GET",
|
||||
URL: mustParseURL("/foo"),
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Header: http.Header{
|
||||
"X-Foo": []string{"X-Bar"},
|
||||
},
|
||||
},
|
||||
|
||||
WantDump: "GET /foo HTTP/1.0\r\n" +
|
||||
"X-Foo: X-Bar\r\n\r\n",
|
||||
},
|
||||
|
||||
{
|
||||
Req: *mustNewRequest("GET", "http://example.com/foo", nil),
|
||||
|
||||
WantDumpOut: "GET /foo HTTP/1.1\r\n" +
|
||||
"Host: example.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Accept-Encoding: gzip\r\n\r\n",
|
||||
},
|
||||
|
||||
// Test that an https URL doesn't try to do an SSL negotiation
|
||||
// with a bytes.Buffer and hang with all goroutines not
|
||||
// runnable.
|
||||
{
|
||||
Req: *mustNewRequest("GET", "https://example.com/foo", nil),
|
||||
|
||||
WantDumpOut: "GET /foo HTTP/1.1\r\n" +
|
||||
"Host: example.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Accept-Encoding: gzip\r\n\r\n",
|
||||
},
|
||||
|
||||
// Request with Body, but Dump requested without it.
|
||||
{
|
||||
Req: http.Request{
|
||||
Method: "POST",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "post.tld",
|
||||
Path: "/",
|
||||
},
|
||||
ContentLength: 6,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
},
|
||||
|
||||
Body: []byte("abcdef"),
|
||||
|
||||
WantDumpOut: "POST / HTTP/1.1\r\n" +
|
||||
"Host: post.tld\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Content-Length: 6\r\n" +
|
||||
"Accept-Encoding: gzip\r\n\r\n",
|
||||
|
||||
NoBody: true,
|
||||
},
|
||||
}
|
||||
|
||||
func TestDumpRequest(t *testing.T) {
|
||||
numg0 := runtime.NumGoroutine()
|
||||
for i, tt := range dumpTests {
|
||||
setBody := func() {
|
||||
if tt.Body == nil {
|
||||
return
|
||||
}
|
||||
switch b := tt.Body.(type) {
|
||||
case []byte:
|
||||
tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
case func() io.ReadCloser:
|
||||
tt.Req.Body = b()
|
||||
}
|
||||
}
|
||||
setBody()
|
||||
if tt.Req.Header == nil {
|
||||
tt.Req.Header = make(http.Header)
|
||||
}
|
||||
|
||||
if tt.WantDump != "" {
|
||||
setBody()
|
||||
dump, err := DumpRequest(&tt.Req, !tt.NoBody)
|
||||
if err != nil {
|
||||
t.Errorf("DumpRequest #%d: %s", i, err)
|
||||
continue
|
||||
}
|
||||
if string(dump) != tt.WantDump {
|
||||
t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tt.WantDumpOut != "" {
|
||||
setBody()
|
||||
dump, err := DumpRequestOut(&tt.Req, !tt.NoBody)
|
||||
if err != nil {
|
||||
t.Errorf("DumpRequestOut #%d: %s", i, err)
|
||||
continue
|
||||
}
|
||||
if string(dump) != tt.WantDumpOut {
|
||||
t.Errorf("DumpRequestOut %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDumpOut, string(dump))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if dg := runtime.NumGoroutine() - numg0; dg > 4 {
|
||||
t.Errorf("Unexpectedly large number of new goroutines: %d new", dg)
|
||||
}
|
||||
}
|
||||
|
||||
func chunk(s string) string {
|
||||
return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
|
||||
}
|
||||
|
||||
func mustParseURL(s string) *url.URL {
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func mustNewRequest(method, url string, body io.Reader) *http.Request {
|
||||
req, err := http.NewRequest(method, url, body)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("NewRequest(%q, %q, %p) err = %v", method, url, body, err))
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
var dumpResTests = []struct {
|
||||
res *http.Response
|
||||
body bool
|
||||
want string
|
||||
}{
|
||||
{
|
||||
res: &http.Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 50,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"Bar"},
|
||||
},
|
||||
Body: ioutil.NopCloser(strings.NewReader("foo")), // shouldn't be used
|
||||
},
|
||||
body: false, // to verify we see 50, not empty or 3.
|
||||
want: `HTTP/1.1 200 OK
|
||||
Content-Length: 50
|
||||
Foo: Bar`,
|
||||
},
|
||||
|
||||
{
|
||||
res: &http.Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 3,
|
||||
Body: ioutil.NopCloser(strings.NewReader("foo")),
|
||||
},
|
||||
body: true,
|
||||
want: `HTTP/1.1 200 OK
|
||||
Content-Length: 3
|
||||
|
||||
foo`,
|
||||
},
|
||||
|
||||
{
|
||||
res: &http.Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: -1,
|
||||
Body: ioutil.NopCloser(strings.NewReader("foo")),
|
||||
TransferEncoding: []string{"chunked"},
|
||||
},
|
||||
body: true,
|
||||
want: `HTTP/1.1 200 OK
|
||||
Transfer-Encoding: chunked
|
||||
|
||||
3
|
||||
foo
|
||||
0`,
|
||||
},
|
||||
}
|
||||
|
||||
func TestDumpResponse(t *testing.T) {
|
||||
for i, tt := range dumpResTests {
|
||||
gotb, err := DumpResponse(tt.res, tt.body)
|
||||
if err != nil {
|
||||
t.Errorf("%d. DumpResponse = %v", i, err)
|
||||
continue
|
||||
}
|
||||
got := string(gotb)
|
||||
got = strings.TrimSpace(got)
|
||||
got = strings.Replace(got, "\r", "", -1)
|
||||
|
||||
if got != tt.want {
|
||||
t.Errorf("%d.\nDumpResponse got:\n%s\n\nWant:\n%s\n", i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
213
vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go
generated
vendored
213
vendor/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go
generated
vendored
|
@ -1,213 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Reverse proxy tests.
|
||||
|
||||
package httputil
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const fakeHopHeader = "X-Fake-Hop-Header-For-Test"
|
||||
|
||||
func init() {
|
||||
hopHeaders = append(hopHeaders, fakeHopHeader)
|
||||
}
|
||||
|
||||
func TestReverseProxy(t *testing.T) {
|
||||
const backendResponse = "I am the backend"
|
||||
const backendStatus = 404
|
||||
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if len(r.TransferEncoding) > 0 {
|
||||
t.Errorf("backend got unexpected TransferEncoding: %v", r.TransferEncoding)
|
||||
}
|
||||
if r.Header.Get("X-Forwarded-For") == "" {
|
||||
t.Errorf("didn't get X-Forwarded-For header")
|
||||
}
|
||||
if c := r.Header.Get("Connection"); c != "" {
|
||||
t.Errorf("handler got Connection header value %q", c)
|
||||
}
|
||||
if c := r.Header.Get("Upgrade"); c != "" {
|
||||
t.Errorf("handler got Upgrade header value %q", c)
|
||||
}
|
||||
if g, e := r.Host, "some-name"; g != e {
|
||||
t.Errorf("backend got Host header %q, want %q", g, e)
|
||||
}
|
||||
w.Header().Set("X-Foo", "bar")
|
||||
w.Header().Set("Upgrade", "foo")
|
||||
w.Header().Set(fakeHopHeader, "foo")
|
||||
w.Header().Add("X-Multi-Value", "foo")
|
||||
w.Header().Add("X-Multi-Value", "bar")
|
||||
http.SetCookie(w, &http.Cookie{Name: "flavor", Value: "chocolateChip"})
|
||||
w.WriteHeader(backendStatus)
|
||||
w.Write([]byte(backendResponse))
|
||||
}))
|
||||
defer backend.Close()
|
||||
backendURL, err := url.Parse(backend.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
proxyHandler := NewSingleHostReverseProxy(backendURL)
|
||||
frontend := httptest.NewServer(proxyHandler)
|
||||
defer frontend.Close()
|
||||
|
||||
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
|
||||
getReq.Host = "some-name"
|
||||
getReq.Header.Set("Connection", "close")
|
||||
getReq.Header.Set("Upgrade", "foo")
|
||||
getReq.Close = true
|
||||
res, err := http.DefaultClient.Do(getReq)
|
||||
if err != nil {
|
||||
t.Fatalf("Get: %v", err)
|
||||
}
|
||||
if g, e := res.StatusCode, backendStatus; g != e {
|
||||
t.Errorf("got res.StatusCode %d; expected %d", g, e)
|
||||
}
|
||||
if g, e := res.Header.Get("X-Foo"), "bar"; g != e {
|
||||
t.Errorf("got X-Foo %q; expected %q", g, e)
|
||||
}
|
||||
if c := res.Header.Get(fakeHopHeader); c != "" {
|
||||
t.Errorf("got %s header value %q", fakeHopHeader, c)
|
||||
}
|
||||
if g, e := len(res.Header["X-Multi-Value"]), 2; g != e {
|
||||
t.Errorf("got %d X-Multi-Value header values; expected %d", g, e)
|
||||
}
|
||||
if g, e := len(res.Header["Set-Cookie"]), 1; g != e {
|
||||
t.Fatalf("got %d SetCookies, want %d", g, e)
|
||||
}
|
||||
if cookie := res.Cookies()[0]; cookie.Name != "flavor" {
|
||||
t.Errorf("unexpected cookie %q", cookie.Name)
|
||||
}
|
||||
bodyBytes, _ := ioutil.ReadAll(res.Body)
|
||||
if g, e := string(bodyBytes), backendResponse; g != e {
|
||||
t.Errorf("got body %q; expected %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestXForwardedFor(t *testing.T) {
|
||||
const prevForwardedFor = "client ip"
|
||||
const backendResponse = "I am the backend"
|
||||
const backendStatus = 404
|
||||
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Header.Get("X-Forwarded-For") == "" {
|
||||
t.Errorf("didn't get X-Forwarded-For header")
|
||||
}
|
||||
if !strings.Contains(r.Header.Get("X-Forwarded-For"), prevForwardedFor) {
|
||||
t.Errorf("X-Forwarded-For didn't contain prior data")
|
||||
}
|
||||
w.WriteHeader(backendStatus)
|
||||
w.Write([]byte(backendResponse))
|
||||
}))
|
||||
defer backend.Close()
|
||||
backendURL, err := url.Parse(backend.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
proxyHandler := NewSingleHostReverseProxy(backendURL)
|
||||
frontend := httptest.NewServer(proxyHandler)
|
||||
defer frontend.Close()
|
||||
|
||||
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
|
||||
getReq.Host = "some-name"
|
||||
getReq.Header.Set("Connection", "close")
|
||||
getReq.Header.Set("X-Forwarded-For", prevForwardedFor)
|
||||
getReq.Close = true
|
||||
res, err := http.DefaultClient.Do(getReq)
|
||||
if err != nil {
|
||||
t.Fatalf("Get: %v", err)
|
||||
}
|
||||
if g, e := res.StatusCode, backendStatus; g != e {
|
||||
t.Errorf("got res.StatusCode %d; expected %d", g, e)
|
||||
}
|
||||
bodyBytes, _ := ioutil.ReadAll(res.Body)
|
||||
if g, e := string(bodyBytes), backendResponse; g != e {
|
||||
t.Errorf("got body %q; expected %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
var proxyQueryTests = []struct {
|
||||
baseSuffix string // suffix to add to backend URL
|
||||
reqSuffix string // suffix to add to frontend's request URL
|
||||
want string // what backend should see for final request URL (without ?)
|
||||
}{
|
||||
{"", "", ""},
|
||||
{"?sta=tic", "?us=er", "sta=tic&us=er"},
|
||||
{"", "?us=er", "us=er"},
|
||||
{"?sta=tic", "", "sta=tic"},
|
||||
}
|
||||
|
||||
func TestReverseProxyQuery(t *testing.T) {
|
||||
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("X-Got-Query", r.URL.RawQuery)
|
||||
w.Write([]byte("hi"))
|
||||
}))
|
||||
defer backend.Close()
|
||||
|
||||
for i, tt := range proxyQueryTests {
|
||||
backendURL, err := url.Parse(backend.URL + tt.baseSuffix)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
frontend := httptest.NewServer(NewSingleHostReverseProxy(backendURL))
|
||||
req, _ := http.NewRequest("GET", frontend.URL+tt.reqSuffix, nil)
|
||||
req.Close = true
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("%d. Get: %v", i, err)
|
||||
}
|
||||
if g, e := res.Header.Get("X-Got-Query"), tt.want; g != e {
|
||||
t.Errorf("%d. got query %q; expected %q", i, g, e)
|
||||
}
|
||||
res.Body.Close()
|
||||
frontend.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestReverseProxyFlushInterval(t *testing.T) {
|
||||
const expected = "hi"
|
||||
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte(expected))
|
||||
}))
|
||||
defer backend.Close()
|
||||
|
||||
backendURL, err := url.Parse(backend.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
proxyHandler := NewSingleHostReverseProxy(backendURL)
|
||||
proxyHandler.FlushInterval = time.Microsecond
|
||||
|
||||
done := make(chan bool)
|
||||
onExitFlushLoop = func() { done <- true }
|
||||
defer func() { onExitFlushLoop = nil }()
|
||||
|
||||
frontend := httptest.NewServer(proxyHandler)
|
||||
defer frontend.Close()
|
||||
|
||||
req, _ := http.NewRequest("GET", frontend.URL, nil)
|
||||
req.Close = true
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("Get: %v", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if bodyBytes, _ := ioutil.ReadAll(res.Body); string(bodyBytes) != expected {
|
||||
t.Errorf("got body %q; expected %q", bodyBytes, expected)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
// OK
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Error("maxLatencyWriter flushLoop() never exited")
|
||||
}
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func isChar(c rune) bool { return c <= 127 }
|
||||
|
||||
func isCtl(c rune) bool { return c <= 31 || c == 127 }
|
||||
|
||||
func isSeparator(c rune) bool {
|
||||
switch c {
|
||||
case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func TestIsToken(t *testing.T) {
|
||||
for i := 0; i <= 130; i++ {
|
||||
r := rune(i)
|
||||
expected := isChar(r) && !isCtl(r) && !isSeparator(r)
|
||||
if isToken(r) != expected {
|
||||
t.Errorf("isToken(0x%x) = %v", r, !expected)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
. "net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNextProtoUpgrade(t *testing.T) {
|
||||
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
fmt.Fprintf(w, "path=%s,proto=", r.URL.Path)
|
||||
if r.TLS != nil {
|
||||
w.Write([]byte(r.TLS.NegotiatedProtocol))
|
||||
}
|
||||
if r.RemoteAddr == "" {
|
||||
t.Error("request with no RemoteAddr")
|
||||
}
|
||||
if r.Body == nil {
|
||||
t.Errorf("request with nil Body")
|
||||
}
|
||||
}))
|
||||
ts.TLS = &tls.Config{
|
||||
NextProtos: []string{"unhandled-proto", "tls-0.9"},
|
||||
}
|
||||
ts.Config.TLSNextProto = map[string]func(*Server, *tls.Conn, Handler){
|
||||
"tls-0.9": handleTLSProtocol09,
|
||||
}
|
||||
ts.StartTLS()
|
||||
defer ts.Close()
|
||||
|
||||
tr := newTLSTransport(t, ts)
|
||||
defer tr.CloseIdleConnections()
|
||||
c := &Client{Transport: tr}
|
||||
|
||||
// Normal request, without NPN.
|
||||
{
|
||||
res, err := c.Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if want := "path=/,proto="; string(body) != want {
|
||||
t.Errorf("plain request = %q; want %q", body, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Request to an advertised but unhandled NPN protocol.
|
||||
// Server will hang up.
|
||||
{
|
||||
tr.CloseIdleConnections()
|
||||
tr.TLSClientConfig.NextProtos = []string{"unhandled-proto"}
|
||||
_, err := c.Get(ts.URL)
|
||||
if err == nil {
|
||||
t.Errorf("expected error on unhandled-proto request")
|
||||
}
|
||||
}
|
||||
|
||||
// Request using the "tls-0.9" protocol, which we register here.
|
||||
// It is HTTP/0.9 over TLS.
|
||||
{
|
||||
tlsConfig := newTLSTransport(t, ts).TLSClientConfig
|
||||
tlsConfig.NextProtos = []string{"tls-0.9"}
|
||||
conn, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
conn.Write([]byte("GET /foo\n"))
|
||||
body, err := ioutil.ReadAll(conn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if want := "path=/foo,proto=tls-0.9"; string(body) != want {
|
||||
t.Errorf("plain request = %q; want %q", body, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleTLSProtocol09 implements the HTTP/0.9 protocol over TLS, for the
|
||||
// TestNextProtoUpgrade test.
|
||||
func handleTLSProtocol09(srv *Server, conn *tls.Conn, h Handler) {
|
||||
br := bufio.NewReader(conn)
|
||||
line, err := br.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
path := strings.TrimPrefix(line, "GET ")
|
||||
if path == line {
|
||||
return
|
||||
}
|
||||
req, _ := NewRequest("GET", path, nil)
|
||||
req.Proto = "HTTP/0.9"
|
||||
req.ProtoMajor = 0
|
||||
req.ProtoMinor = 9
|
||||
rw := &http09Writer{conn, make(Header)}
|
||||
h.ServeHTTP(rw, req)
|
||||
}
|
||||
|
||||
type http09Writer struct {
|
||||
io.Writer
|
||||
h Header
|
||||
}
|
||||
|
||||
func (w http09Writer) Header() Header { return w.h }
|
||||
func (w http09Writer) WriteHeader(int) {} // no headers
|
|
@ -1,81 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TODO(mattn):
|
||||
// test ProxyAuth
|
||||
|
||||
var UseProxyTests = []struct {
|
||||
host string
|
||||
match bool
|
||||
}{
|
||||
// Never proxy localhost:
|
||||
{"localhost:80", false},
|
||||
{"127.0.0.1", false},
|
||||
{"127.0.0.2", false},
|
||||
{"[::1]", false},
|
||||
{"[::2]", true}, // not a loopback address
|
||||
|
||||
{"barbaz.net", false}, // match as .barbaz.net
|
||||
{"foobar.com", false}, // have a port but match
|
||||
{"foofoobar.com", true}, // not match as a part of foobar.com
|
||||
{"baz.com", true}, // not match as a part of barbaz.com
|
||||
{"localhost.net", true}, // not match as suffix of address
|
||||
{"local.localhost", true}, // not match as prefix as address
|
||||
{"barbarbaz.net", true}, // not match because NO_PROXY have a '.'
|
||||
{"www.foobar.com", false}, // match because NO_PROXY includes "foobar.com"
|
||||
}
|
||||
|
||||
func TestUseProxy(t *testing.T) {
|
||||
ResetProxyEnv()
|
||||
os.Setenv("NO_PROXY", "foobar.com, .barbaz.net")
|
||||
for _, test := range UseProxyTests {
|
||||
if useProxy(test.host+":80") != test.match {
|
||||
t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var cacheKeysTests = []struct {
|
||||
proxy string
|
||||
scheme string
|
||||
addr string
|
||||
key string
|
||||
}{
|
||||
{"", "http", "foo.com", "|http|foo.com"},
|
||||
{"", "https", "foo.com", "|https|foo.com"},
|
||||
{"http://foo.com", "http", "foo.com", "http://foo.com|http|"},
|
||||
{"http://foo.com", "https", "foo.com", "http://foo.com|https|foo.com"},
|
||||
}
|
||||
|
||||
func TestCacheKeys(t *testing.T) {
|
||||
for _, tt := range cacheKeysTests {
|
||||
var proxy *url.URL
|
||||
if tt.proxy != "" {
|
||||
u, err := url.Parse(tt.proxy)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
proxy = u
|
||||
}
|
||||
cm := connectMethod{proxy, tt.scheme, tt.addr}
|
||||
if got := cm.key().String(); got != tt.key {
|
||||
t.Fatalf("{%q, %q, %q} cache key = %q; want %q", tt.proxy, tt.scheme, tt.addr, got, tt.key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ResetProxyEnv() {
|
||||
for _, v := range []string{"HTTP_PROXY", "http_proxy", "NO_PROXY", "no_proxy"} {
|
||||
os.Setenv(v, "")
|
||||
}
|
||||
ResetCachedEnvironment()
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var ParseRangeTests = []struct {
|
||||
s string
|
||||
length int64
|
||||
r []httpRange
|
||||
}{
|
||||
{"", 0, nil},
|
||||
{"", 1000, nil},
|
||||
{"foo", 0, nil},
|
||||
{"bytes=", 0, nil},
|
||||
{"bytes=7", 10, nil},
|
||||
{"bytes= 7 ", 10, nil},
|
||||
{"bytes=1-", 0, nil},
|
||||
{"bytes=5-4", 10, nil},
|
||||
{"bytes=0-2,5-4", 10, nil},
|
||||
{"bytes=2-5,4-3", 10, nil},
|
||||
{"bytes=--5,4--3", 10, nil},
|
||||
{"bytes=A-", 10, nil},
|
||||
{"bytes=A- ", 10, nil},
|
||||
{"bytes=A-Z", 10, nil},
|
||||
{"bytes= -Z", 10, nil},
|
||||
{"bytes=5-Z", 10, nil},
|
||||
{"bytes=Ran-dom, garbage", 10, nil},
|
||||
{"bytes=0x01-0x02", 10, nil},
|
||||
{"bytes= ", 10, nil},
|
||||
{"bytes= , , , ", 10, nil},
|
||||
|
||||
{"bytes=0-9", 10, []httpRange{{0, 10}}},
|
||||
{"bytes=0-", 10, []httpRange{{0, 10}}},
|
||||
{"bytes=5-", 10, []httpRange{{5, 5}}},
|
||||
{"bytes=0-20", 10, []httpRange{{0, 10}}},
|
||||
{"bytes=15-,0-5", 10, nil},
|
||||
{"bytes=1-2,5-", 10, []httpRange{{1, 2}, {5, 5}}},
|
||||
{"bytes=-2 , 7-", 11, []httpRange{{9, 2}, {7, 4}}},
|
||||
{"bytes=0-0 ,2-2, 7-", 11, []httpRange{{0, 1}, {2, 1}, {7, 4}}},
|
||||
{"bytes=-5", 10, []httpRange{{5, 5}}},
|
||||
{"bytes=-15", 10, []httpRange{{0, 10}}},
|
||||
{"bytes=0-499", 10000, []httpRange{{0, 500}}},
|
||||
{"bytes=500-999", 10000, []httpRange{{500, 500}}},
|
||||
{"bytes=-500", 10000, []httpRange{{9500, 500}}},
|
||||
{"bytes=9500-", 10000, []httpRange{{9500, 500}}},
|
||||
{"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}},
|
||||
{"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}},
|
||||
{"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}},
|
||||
|
||||
// Match Apache laxity:
|
||||
{"bytes= 1 -2 , 4- 5, 7 - 8 , ,,", 11, []httpRange{{1, 2}, {4, 2}, {7, 2}}},
|
||||
}
|
||||
|
||||
func TestParseRange(t *testing.T) {
|
||||
for _, test := range ParseRangeTests {
|
||||
r := test.r
|
||||
ranges, err := parseRange(test.s, test.length)
|
||||
if err != nil && r != nil {
|
||||
t.Errorf("parseRange(%q) returned error %q", test.s, err)
|
||||
}
|
||||
if len(ranges) != len(r) {
|
||||
t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r))
|
||||
continue
|
||||
}
|
||||
for i := range r {
|
||||
if ranges[i].start != r[i].start {
|
||||
t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start)
|
||||
}
|
||||
if ranges[i].length != r[i].length {
|
||||
t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,331 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type reqTest struct {
|
||||
Raw string
|
||||
Req *Request
|
||||
Body string
|
||||
Trailer Header
|
||||
Error string
|
||||
}
|
||||
|
||||
var noError = ""
|
||||
var noBody = ""
|
||||
var noTrailer Header = nil
|
||||
|
||||
var reqTests = []reqTest{
|
||||
// Baseline test; All Request fields included for template use
|
||||
{
|
||||
"GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
|
||||
"Host: www.techcrunch.com\r\n" +
|
||||
"User-Agent: Fake\r\n" +
|
||||
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
|
||||
"Accept-Language: en-us,en;q=0.5\r\n" +
|
||||
"Accept-Encoding: gzip,deflate\r\n" +
|
||||
"Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
|
||||
"Keep-Alive: 300\r\n" +
|
||||
"Content-Length: 7\r\n" +
|
||||
"Proxy-Connection: keep-alive\r\n\r\n" +
|
||||
"abcdef\n???",
|
||||
|
||||
&Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.techcrunch.com",
|
||||
Path: "/",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{
|
||||
"Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
|
||||
"Accept-Language": {"en-us,en;q=0.5"},
|
||||
"Accept-Encoding": {"gzip,deflate"},
|
||||
"Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
|
||||
"Keep-Alive": {"300"},
|
||||
"Proxy-Connection": {"keep-alive"},
|
||||
"Content-Length": {"7"},
|
||||
"User-Agent": {"Fake"},
|
||||
},
|
||||
Close: false,
|
||||
ContentLength: 7,
|
||||
Host: "www.techcrunch.com",
|
||||
RequestURI: "http://www.techcrunch.com/",
|
||||
},
|
||||
|
||||
"abcdef\n",
|
||||
|
||||
noTrailer,
|
||||
noError,
|
||||
},
|
||||
|
||||
// GET request with no body (the normal case)
|
||||
{
|
||||
"GET / HTTP/1.1\r\n" +
|
||||
"Host: foo.com\r\n\r\n",
|
||||
|
||||
&Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Path: "/",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
Close: false,
|
||||
ContentLength: 0,
|
||||
Host: "foo.com",
|
||||
RequestURI: "/",
|
||||
},
|
||||
|
||||
noBody,
|
||||
noTrailer,
|
||||
noError,
|
||||
},
|
||||
|
||||
// Tests that we don't parse a path that looks like a
|
||||
// scheme-relative URI as a scheme-relative URI.
|
||||
{
|
||||
"GET //user@host/is/actually/a/path/ HTTP/1.1\r\n" +
|
||||
"Host: test\r\n\r\n",
|
||||
|
||||
&Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Path: "//user@host/is/actually/a/path/",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
Close: false,
|
||||
ContentLength: 0,
|
||||
Host: "test",
|
||||
RequestURI: "//user@host/is/actually/a/path/",
|
||||
},
|
||||
|
||||
noBody,
|
||||
noTrailer,
|
||||
noError,
|
||||
},
|
||||
|
||||
// Tests a bogus abs_path on the Request-Line (RFC 2616 section 5.1.2)
|
||||
{
|
||||
"GET ../../../../etc/passwd HTTP/1.1\r\n" +
|
||||
"Host: test\r\n\r\n",
|
||||
nil,
|
||||
noBody,
|
||||
noTrailer,
|
||||
"parse ../../../../etc/passwd: invalid URI for request",
|
||||
},
|
||||
|
||||
// Tests missing URL:
|
||||
{
|
||||
"GET HTTP/1.1\r\n" +
|
||||
"Host: test\r\n\r\n",
|
||||
nil,
|
||||
noBody,
|
||||
noTrailer,
|
||||
"parse : empty url",
|
||||
},
|
||||
|
||||
// Tests chunked body with trailer:
|
||||
{
|
||||
"POST / HTTP/1.1\r\n" +
|
||||
"Host: foo.com\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
"3\r\nfoo\r\n" +
|
||||
"3\r\nbar\r\n" +
|
||||
"0\r\n" +
|
||||
"Trailer-Key: Trailer-Value\r\n" +
|
||||
"\r\n",
|
||||
&Request{
|
||||
Method: "POST",
|
||||
URL: &url.URL{
|
||||
Path: "/",
|
||||
},
|
||||
TransferEncoding: []string{"chunked"},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
ContentLength: -1,
|
||||
Host: "foo.com",
|
||||
RequestURI: "/",
|
||||
},
|
||||
|
||||
"foobar",
|
||||
Header{
|
||||
"Trailer-Key": {"Trailer-Value"},
|
||||
},
|
||||
noError,
|
||||
},
|
||||
|
||||
// CONNECT request with domain name:
|
||||
{
|
||||
"CONNECT www.google.com:443 HTTP/1.1\r\n\r\n",
|
||||
|
||||
&Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{
|
||||
Host: "www.google.com:443",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
Close: false,
|
||||
ContentLength: 0,
|
||||
Host: "www.google.com:443",
|
||||
RequestURI: "www.google.com:443",
|
||||
},
|
||||
|
||||
noBody,
|
||||
noTrailer,
|
||||
noError,
|
||||
},
|
||||
|
||||
// CONNECT request with IP address:
|
||||
{
|
||||
"CONNECT 127.0.0.1:6060 HTTP/1.1\r\n\r\n",
|
||||
|
||||
&Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{
|
||||
Host: "127.0.0.1:6060",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
Close: false,
|
||||
ContentLength: 0,
|
||||
Host: "127.0.0.1:6060",
|
||||
RequestURI: "127.0.0.1:6060",
|
||||
},
|
||||
|
||||
noBody,
|
||||
noTrailer,
|
||||
noError,
|
||||
},
|
||||
|
||||
// CONNECT request for RPC:
|
||||
{
|
||||
"CONNECT /_goRPC_ HTTP/1.1\r\n\r\n",
|
||||
|
||||
&Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{
|
||||
Path: "/_goRPC_",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
Close: false,
|
||||
ContentLength: 0,
|
||||
Host: "",
|
||||
RequestURI: "/_goRPC_",
|
||||
},
|
||||
|
||||
noBody,
|
||||
noTrailer,
|
||||
noError,
|
||||
},
|
||||
|
||||
// SSDP Notify request. golang.org/issue/3692
|
||||
{
|
||||
"NOTIFY * HTTP/1.1\r\nServer: foo\r\n\r\n",
|
||||
&Request{
|
||||
Method: "NOTIFY",
|
||||
URL: &url.URL{
|
||||
Path: "*",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{
|
||||
"Server": []string{"foo"},
|
||||
},
|
||||
Close: false,
|
||||
ContentLength: 0,
|
||||
RequestURI: "*",
|
||||
},
|
||||
|
||||
noBody,
|
||||
noTrailer,
|
||||
noError,
|
||||
},
|
||||
|
||||
// OPTIONS request. Similar to golang.org/issue/3692
|
||||
{
|
||||
"OPTIONS * HTTP/1.1\r\nServer: foo\r\n\r\n",
|
||||
&Request{
|
||||
Method: "OPTIONS",
|
||||
URL: &url.URL{
|
||||
Path: "*",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{
|
||||
"Server": []string{"foo"},
|
||||
},
|
||||
Close: false,
|
||||
ContentLength: 0,
|
||||
RequestURI: "*",
|
||||
},
|
||||
|
||||
noBody,
|
||||
noTrailer,
|
||||
noError,
|
||||
},
|
||||
}
|
||||
|
||||
func TestReadRequest(t *testing.T) {
|
||||
for i := range reqTests {
|
||||
tt := &reqTests[i]
|
||||
var braw bytes.Buffer
|
||||
braw.WriteString(tt.Raw)
|
||||
req, err := ReadRequest(bufio.NewReader(&braw))
|
||||
if err != nil {
|
||||
if err.Error() != tt.Error {
|
||||
t.Errorf("#%d: error %q, want error %q", i, err.Error(), tt.Error)
|
||||
}
|
||||
continue
|
||||
}
|
||||
rbody := req.Body
|
||||
req.Body = nil
|
||||
diff(t, fmt.Sprintf("#%d Request", i), req, tt.Req)
|
||||
var bout bytes.Buffer
|
||||
if rbody != nil {
|
||||
_, err := io.Copy(&bout, rbody)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d. copying body: %v", i, err)
|
||||
}
|
||||
rbody.Close()
|
||||
}
|
||||
body := bout.String()
|
||||
if body != tt.Body {
|
||||
t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.Trailer, req.Trailer) {
|
||||
t.Errorf("#%d. Trailers differ.\n got: %v\nwant: %v", i, req.Trailer, tt.Trailer)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,610 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
. "net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
req := &Request{Method: "GET"}
|
||||
req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar")
|
||||
if q := req.FormValue("q"); q != "foo" {
|
||||
t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostQuery(t *testing.T) {
|
||||
req, _ := NewRequest("POST", "http://www.google.com/search?q=foo&q=bar&both=x&prio=1&empty=not",
|
||||
strings.NewReader("z=post&both=y&prio=2&empty="))
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value")
|
||||
|
||||
if q := req.FormValue("q"); q != "foo" {
|
||||
t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
|
||||
}
|
||||
if z := req.FormValue("z"); z != "post" {
|
||||
t.Errorf(`req.FormValue("z") = %q, want "post"`, z)
|
||||
}
|
||||
if bq, found := req.PostForm["q"]; found {
|
||||
t.Errorf(`req.PostForm["q"] = %q, want no entry in map`, bq)
|
||||
}
|
||||
if bz := req.PostFormValue("z"); bz != "post" {
|
||||
t.Errorf(`req.PostFormValue("z") = %q, want "post"`, bz)
|
||||
}
|
||||
if qs := req.Form["q"]; !reflect.DeepEqual(qs, []string{"foo", "bar"}) {
|
||||
t.Errorf(`req.Form["q"] = %q, want ["foo", "bar"]`, qs)
|
||||
}
|
||||
if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"y", "x"}) {
|
||||
t.Errorf(`req.Form["both"] = %q, want ["y", "x"]`, both)
|
||||
}
|
||||
if prio := req.FormValue("prio"); prio != "2" {
|
||||
t.Errorf(`req.FormValue("prio") = %q, want "2" (from body)`, prio)
|
||||
}
|
||||
if empty := req.FormValue("empty"); empty != "" {
|
||||
t.Errorf(`req.FormValue("empty") = %q, want "" (from body)`, empty)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchQuery(t *testing.T) {
|
||||
req, _ := NewRequest("PATCH", "http://www.google.com/search?q=foo&q=bar&both=x&prio=1&empty=not",
|
||||
strings.NewReader("z=post&both=y&prio=2&empty="))
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value")
|
||||
|
||||
if q := req.FormValue("q"); q != "foo" {
|
||||
t.Errorf(`req.FormValue("q") = %q, want "foo"`, q)
|
||||
}
|
||||
if z := req.FormValue("z"); z != "post" {
|
||||
t.Errorf(`req.FormValue("z") = %q, want "post"`, z)
|
||||
}
|
||||
if bq, found := req.PostForm["q"]; found {
|
||||
t.Errorf(`req.PostForm["q"] = %q, want no entry in map`, bq)
|
||||
}
|
||||
if bz := req.PostFormValue("z"); bz != "post" {
|
||||
t.Errorf(`req.PostFormValue("z") = %q, want "post"`, bz)
|
||||
}
|
||||
if qs := req.Form["q"]; !reflect.DeepEqual(qs, []string{"foo", "bar"}) {
|
||||
t.Errorf(`req.Form["q"] = %q, want ["foo", "bar"]`, qs)
|
||||
}
|
||||
if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"y", "x"}) {
|
||||
t.Errorf(`req.Form["both"] = %q, want ["y", "x"]`, both)
|
||||
}
|
||||
if prio := req.FormValue("prio"); prio != "2" {
|
||||
t.Errorf(`req.FormValue("prio") = %q, want "2" (from body)`, prio)
|
||||
}
|
||||
if empty := req.FormValue("empty"); empty != "" {
|
||||
t.Errorf(`req.FormValue("empty") = %q, want "" (from body)`, empty)
|
||||
}
|
||||
}
|
||||
|
||||
type stringMap map[string][]string
|
||||
type parseContentTypeTest struct {
|
||||
shouldError bool
|
||||
contentType stringMap
|
||||
}
|
||||
|
||||
var parseContentTypeTests = []parseContentTypeTest{
|
||||
{false, stringMap{"Content-Type": {"text/plain"}}},
|
||||
// Empty content type is legal - shoult be treated as
|
||||
// application/octet-stream (RFC 2616, section 7.2.1)
|
||||
{false, stringMap{}},
|
||||
{true, stringMap{"Content-Type": {"text/plain; boundary="}}},
|
||||
{false, stringMap{"Content-Type": {"application/unknown"}}},
|
||||
}
|
||||
|
||||
func TestParseFormUnknownContentType(t *testing.T) {
|
||||
for i, test := range parseContentTypeTests {
|
||||
req := &Request{
|
||||
Method: "POST",
|
||||
Header: Header(test.contentType),
|
||||
Body: ioutil.NopCloser(strings.NewReader("body")),
|
||||
}
|
||||
err := req.ParseForm()
|
||||
switch {
|
||||
case err == nil && test.shouldError:
|
||||
t.Errorf("test %d should have returned error", i)
|
||||
case err != nil && !test.shouldError:
|
||||
t.Errorf("test %d should not have returned error, got %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFormInitializeOnError(t *testing.T) {
|
||||
nilBody, _ := NewRequest("POST", "http://www.google.com/search?q=foo", nil)
|
||||
tests := []*Request{
|
||||
nilBody,
|
||||
{Method: "GET", URL: nil},
|
||||
}
|
||||
for i, req := range tests {
|
||||
err := req.ParseForm()
|
||||
if req.Form == nil {
|
||||
t.Errorf("%d. Form not initialized, error %v", i, err)
|
||||
}
|
||||
if req.PostForm == nil {
|
||||
t.Errorf("%d. PostForm not initialized, error %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipartReader(t *testing.T) {
|
||||
req := &Request{
|
||||
Method: "POST",
|
||||
Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}},
|
||||
Body: ioutil.NopCloser(new(bytes.Buffer)),
|
||||
}
|
||||
multipart, err := req.MultipartReader()
|
||||
if multipart == nil {
|
||||
t.Errorf("expected multipart; error: %v", err)
|
||||
}
|
||||
|
||||
req.Header = Header{"Content-Type": {"text/plain"}}
|
||||
multipart, err = req.MultipartReader()
|
||||
if multipart != nil {
|
||||
t.Error("unexpected multipart for text/plain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMultipartForm(t *testing.T) {
|
||||
req := &Request{
|
||||
Method: "POST",
|
||||
Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}},
|
||||
Body: ioutil.NopCloser(new(bytes.Buffer)),
|
||||
}
|
||||
err := req.ParseMultipartForm(25)
|
||||
if err == nil {
|
||||
t.Error("expected multipart EOF, got nil")
|
||||
}
|
||||
|
||||
req.Header = Header{"Content-Type": {"text/plain"}}
|
||||
err = req.ParseMultipartForm(25)
|
||||
if err != ErrNotMultipart {
|
||||
t.Error("expected ErrNotMultipart for text/plain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRedirect(t *testing.T) {
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
switch r.URL.Path {
|
||||
case "/":
|
||||
w.Header().Set("Location", "/foo/")
|
||||
w.WriteHeader(StatusSeeOther)
|
||||
case "/foo/":
|
||||
fmt.Fprintf(w, "foo")
|
||||
default:
|
||||
w.WriteHeader(StatusBadRequest)
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
var end = regexp.MustCompile("/foo/$")
|
||||
r, err := Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.Body.Close()
|
||||
url := r.Request.URL.String()
|
||||
if r.StatusCode != 200 || !end.MatchString(url) {
|
||||
t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetBasicAuth(t *testing.T) {
|
||||
r, _ := NewRequest("GET", "http://example.com/", nil)
|
||||
r.SetBasicAuth("Aladdin", "open sesame")
|
||||
if g, e := r.Header.Get("Authorization"), "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="; g != e {
|
||||
t.Errorf("got header %q, want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipartRequest(t *testing.T) {
|
||||
// Test that we can read the values and files of a
|
||||
// multipart request with FormValue and FormFile,
|
||||
// and that ParseMultipartForm can be called multiple times.
|
||||
req := newTestMultipartRequest(t)
|
||||
if err := req.ParseMultipartForm(25); err != nil {
|
||||
t.Fatal("ParseMultipartForm first call:", err)
|
||||
}
|
||||
defer req.MultipartForm.RemoveAll()
|
||||
validateTestMultipartContents(t, req, false)
|
||||
if err := req.ParseMultipartForm(25); err != nil {
|
||||
t.Fatal("ParseMultipartForm second call:", err)
|
||||
}
|
||||
validateTestMultipartContents(t, req, false)
|
||||
}
|
||||
|
||||
func TestMultipartRequestAuto(t *testing.T) {
|
||||
// Test that FormValue and FormFile automatically invoke
|
||||
// ParseMultipartForm and return the right values.
|
||||
req := newTestMultipartRequest(t)
|
||||
defer func() {
|
||||
if req.MultipartForm != nil {
|
||||
req.MultipartForm.RemoveAll()
|
||||
}
|
||||
}()
|
||||
validateTestMultipartContents(t, req, true)
|
||||
}
|
||||
|
||||
func TestMissingFileMultipartRequest(t *testing.T) {
|
||||
// Test that FormFile returns an error if
|
||||
// the named file is missing.
|
||||
req := newTestMultipartRequest(t)
|
||||
testMissingFile(t, req)
|
||||
}
|
||||
|
||||
// Test that FormValue invokes ParseMultipartForm.
|
||||
func TestFormValueCallsParseMultipartForm(t *testing.T) {
|
||||
req, _ := NewRequest("POST", "http://www.google.com/", strings.NewReader("z=post"))
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value")
|
||||
if req.Form != nil {
|
||||
t.Fatal("Unexpected request Form, want nil")
|
||||
}
|
||||
req.FormValue("z")
|
||||
if req.Form == nil {
|
||||
t.Fatal("ParseMultipartForm not called by FormValue")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that FormFile invokes ParseMultipartForm.
|
||||
func TestFormFileCallsParseMultipartForm(t *testing.T) {
|
||||
req := newTestMultipartRequest(t)
|
||||
if req.Form != nil {
|
||||
t.Fatal("Unexpected request Form, want nil")
|
||||
}
|
||||
req.FormFile("")
|
||||
if req.Form == nil {
|
||||
t.Fatal("ParseMultipartForm not called by FormFile")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that ParseMultipartForm errors if called
|
||||
// after MultipartReader on the same request.
|
||||
func TestParseMultipartFormOrder(t *testing.T) {
|
||||
req := newTestMultipartRequest(t)
|
||||
if _, err := req.MultipartReader(); err != nil {
|
||||
t.Fatalf("MultipartReader: %v", err)
|
||||
}
|
||||
if err := req.ParseMultipartForm(1024); err == nil {
|
||||
t.Fatal("expected an error from ParseMultipartForm after call to MultipartReader")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that MultipartReader errors if called
|
||||
// after ParseMultipartForm on the same request.
|
||||
func TestMultipartReaderOrder(t *testing.T) {
|
||||
req := newTestMultipartRequest(t)
|
||||
if err := req.ParseMultipartForm(25); err != nil {
|
||||
t.Fatalf("ParseMultipartForm: %v", err)
|
||||
}
|
||||
defer req.MultipartForm.RemoveAll()
|
||||
if _, err := req.MultipartReader(); err == nil {
|
||||
t.Fatal("expected an error from MultipartReader after call to ParseMultipartForm")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that FormFile errors if called after
|
||||
// MultipartReader on the same request.
|
||||
func TestFormFileOrder(t *testing.T) {
|
||||
req := newTestMultipartRequest(t)
|
||||
if _, err := req.MultipartReader(); err != nil {
|
||||
t.Fatalf("MultipartReader: %v", err)
|
||||
}
|
||||
if _, _, err := req.FormFile(""); err == nil {
|
||||
t.Fatal("expected an error from FormFile after call to MultipartReader")
|
||||
}
|
||||
}
|
||||
|
||||
var readRequestErrorTests = []struct {
|
||||
in string
|
||||
err error
|
||||
}{
|
||||
{"GET / HTTP/1.1\r\nheader:foo\r\n\r\n", nil},
|
||||
{"GET / HTTP/1.1\r\nheader:foo\r\n", io.ErrUnexpectedEOF},
|
||||
{"", io.EOF},
|
||||
}
|
||||
|
||||
func TestReadRequestErrors(t *testing.T) {
|
||||
for i, tt := range readRequestErrorTests {
|
||||
_, err := ReadRequest(bufio.NewReader(strings.NewReader(tt.in)))
|
||||
if err != tt.err {
|
||||
t.Errorf("%d. got error = %v; want %v", i, err, tt.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRequestHost(t *testing.T) {
|
||||
req, err := NewRequest("GET", "http://localhost:1234/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if req.Host != "localhost:1234" {
|
||||
t.Errorf("Host = %q; want localhost:1234", req.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRequestContentLength(t *testing.T) {
|
||||
readByte := func(r io.Reader) io.Reader {
|
||||
var b [1]byte
|
||||
r.Read(b[:])
|
||||
return r
|
||||
}
|
||||
tests := []struct {
|
||||
r io.Reader
|
||||
want int64
|
||||
}{
|
||||
{bytes.NewReader([]byte("123")), 3},
|
||||
{bytes.NewBuffer([]byte("1234")), 4},
|
||||
{strings.NewReader("12345"), 5},
|
||||
// Not detected:
|
||||
{struct{ io.Reader }{strings.NewReader("xyz")}, 0},
|
||||
{io.NewSectionReader(strings.NewReader("x"), 0, 6), 0},
|
||||
{readByte(io.NewSectionReader(strings.NewReader("xy"), 0, 6)), 0},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
req, err := NewRequest("POST", "http://localhost/", tt.r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if req.ContentLength != tt.want {
|
||||
t.Errorf("ContentLength(%T) = %d; want %d", tt.r, req.ContentLength, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var parseHTTPVersionTests = []struct {
|
||||
vers string
|
||||
major, minor int
|
||||
ok bool
|
||||
}{
|
||||
{"HTTP/0.9", 0, 9, true},
|
||||
{"HTTP/1.0", 1, 0, true},
|
||||
{"HTTP/1.1", 1, 1, true},
|
||||
{"HTTP/3.14", 3, 14, true},
|
||||
|
||||
{"HTTP", 0, 0, false},
|
||||
{"HTTP/one.one", 0, 0, false},
|
||||
{"HTTP/1.1/", 0, 0, false},
|
||||
{"HTTP/-1,0", 0, 0, false},
|
||||
{"HTTP/0,-1", 0, 0, false},
|
||||
{"HTTP/", 0, 0, false},
|
||||
{"HTTP/1,1", 0, 0, false},
|
||||
}
|
||||
|
||||
func TestParseHTTPVersion(t *testing.T) {
|
||||
for _, tt := range parseHTTPVersionTests {
|
||||
major, minor, ok := ParseHTTPVersion(tt.vers)
|
||||
if ok != tt.ok || major != tt.major || minor != tt.minor {
|
||||
type version struct {
|
||||
major, minor int
|
||||
ok bool
|
||||
}
|
||||
t.Errorf("failed to parse %q, expected: %#v, got %#v", tt.vers, version{tt.major, tt.minor, tt.ok}, version{major, minor, ok})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type logWrites struct {
|
||||
t *testing.T
|
||||
dst *[]string
|
||||
}
|
||||
|
||||
func (l logWrites) WriteByte(c byte) error {
|
||||
l.t.Fatalf("unexpected WriteByte call")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l logWrites) Write(p []byte) (n int, err error) {
|
||||
*l.dst = append(*l.dst, string(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func TestRequestWriteBufferedWriter(t *testing.T) {
|
||||
got := []string{}
|
||||
req, _ := NewRequest("GET", "http://foo.com/", nil)
|
||||
req.Write(logWrites{t, &got})
|
||||
want := []string{
|
||||
"GET / HTTP/1.1\r\n",
|
||||
"Host: foo.com\r\n",
|
||||
"User-Agent: " + DefaultUserAgent + "\r\n",
|
||||
"\r\n",
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Writes = %q\n Want = %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func testMissingFile(t *testing.T, req *Request) {
|
||||
f, fh, err := req.FormFile("missing")
|
||||
if f != nil {
|
||||
t.Errorf("FormFile file = %v, want nil", f)
|
||||
}
|
||||
if fh != nil {
|
||||
t.Errorf("FormFile file header = %q, want nil", fh)
|
||||
}
|
||||
if err != ErrMissingFile {
|
||||
t.Errorf("FormFile err = %q, want ErrMissingFile", err)
|
||||
}
|
||||
}
|
||||
|
||||
func newTestMultipartRequest(t *testing.T) *Request {
|
||||
b := strings.NewReader(strings.Replace(message, "\n", "\r\n", -1))
|
||||
req, err := NewRequest("POST", "/", b)
|
||||
if err != nil {
|
||||
t.Fatal("NewRequest:", err)
|
||||
}
|
||||
ctype := fmt.Sprintf(`multipart/form-data; boundary="%s"`, boundary)
|
||||
req.Header.Set("Content-type", ctype)
|
||||
return req
|
||||
}
|
||||
|
||||
func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) {
|
||||
if g, e := req.FormValue("texta"), textaValue; g != e {
|
||||
t.Errorf("texta value = %q, want %q", g, e)
|
||||
}
|
||||
if g, e := req.FormValue("textb"), textbValue; g != e {
|
||||
t.Errorf("textb value = %q, want %q", g, e)
|
||||
}
|
||||
if g := req.FormValue("missing"); g != "" {
|
||||
t.Errorf("missing value = %q, want empty string", g)
|
||||
}
|
||||
|
||||
assertMem := func(n string, fd multipart.File) {
|
||||
if _, ok := fd.(*os.File); ok {
|
||||
t.Error(n, " is *os.File, should not be")
|
||||
}
|
||||
}
|
||||
fda := testMultipartFile(t, req, "filea", "filea.txt", fileaContents)
|
||||
defer fda.Close()
|
||||
assertMem("filea", fda)
|
||||
fdb := testMultipartFile(t, req, "fileb", "fileb.txt", filebContents)
|
||||
defer fdb.Close()
|
||||
if allMem {
|
||||
assertMem("fileb", fdb)
|
||||
} else {
|
||||
if _, ok := fdb.(*os.File); !ok {
|
||||
t.Errorf("fileb has unexpected underlying type %T", fdb)
|
||||
}
|
||||
}
|
||||
|
||||
testMissingFile(t, req)
|
||||
}
|
||||
|
||||
func testMultipartFile(t *testing.T, req *Request, key, expectFilename, expectContent string) multipart.File {
|
||||
f, fh, err := req.FormFile(key)
|
||||
if err != nil {
|
||||
t.Fatalf("FormFile(%q): %q", key, err)
|
||||
}
|
||||
if fh.Filename != expectFilename {
|
||||
t.Errorf("filename = %q, want %q", fh.Filename, expectFilename)
|
||||
}
|
||||
var b bytes.Buffer
|
||||
_, err = io.Copy(&b, f)
|
||||
if err != nil {
|
||||
t.Fatal("copying contents:", err)
|
||||
}
|
||||
if g := b.String(); g != expectContent {
|
||||
t.Errorf("contents = %q, want %q", g, expectContent)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
const (
|
||||
fileaContents = "This is a test file."
|
||||
filebContents = "Another test file."
|
||||
textaValue = "foo"
|
||||
textbValue = "bar"
|
||||
boundary = `MyBoundary`
|
||||
)
|
||||
|
||||
const message = `
|
||||
--MyBoundary
|
||||
Content-Disposition: form-data; name="filea"; filename="filea.txt"
|
||||
Content-Type: text/plain
|
||||
|
||||
` + fileaContents + `
|
||||
--MyBoundary
|
||||
Content-Disposition: form-data; name="fileb"; filename="fileb.txt"
|
||||
Content-Type: text/plain
|
||||
|
||||
` + filebContents + `
|
||||
--MyBoundary
|
||||
Content-Disposition: form-data; name="texta"
|
||||
|
||||
` + textaValue + `
|
||||
--MyBoundary
|
||||
Content-Disposition: form-data; name="textb"
|
||||
|
||||
` + textbValue + `
|
||||
--MyBoundary--
|
||||
`
|
||||
|
||||
func benchmarkReadRequest(b *testing.B, request string) {
|
||||
request = request + "\n" // final \n
|
||||
request = strings.Replace(request, "\n", "\r\n", -1) // expand \n to \r\n
|
||||
b.SetBytes(int64(len(request)))
|
||||
r := bufio.NewReader(&infiniteReader{buf: []byte(request)})
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := ReadRequest(r)
|
||||
if err != nil {
|
||||
b.Fatalf("failed to read request: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// infiniteReader satisfies Read requests as if the contents of buf
|
||||
// loop indefinitely.
|
||||
type infiniteReader struct {
|
||||
buf []byte
|
||||
offset int
|
||||
}
|
||||
|
||||
func (r *infiniteReader) Read(b []byte) (int, error) {
|
||||
n := copy(b, r.buf[r.offset:])
|
||||
r.offset = (r.offset + n) % len(r.buf)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func BenchmarkReadRequestChrome(b *testing.B) {
|
||||
// https://github.com/felixge/node-http-perf/blob/master/fixtures/get.http
|
||||
benchmarkReadRequest(b, `GET / HTTP/1.1
|
||||
Host: localhost:8080
|
||||
Connection: keep-alive
|
||||
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
|
||||
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17
|
||||
Accept-Encoding: gzip,deflate,sdch
|
||||
Accept-Language: en-US,en;q=0.8
|
||||
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3
|
||||
Cookie: __utma=1.1978842379.1323102373.1323102373.1323102373.1; EPi:NumberOfVisits=1,2012-02-28T13:42:18; CrmSession=5b707226b9563e1bc69084d07a107c98; plushContainerWidth=100%25; plushNoTopMenu=0; hudson_auto_refresh=false
|
||||
`)
|
||||
}
|
||||
|
||||
func BenchmarkReadRequestCurl(b *testing.B) {
|
||||
// curl http://localhost:8080/
|
||||
benchmarkReadRequest(b, `GET / HTTP/1.1
|
||||
User-Agent: curl/7.27.0
|
||||
Host: localhost:8080
|
||||
Accept: */*
|
||||
`)
|
||||
}
|
||||
|
||||
func BenchmarkReadRequestApachebench(b *testing.B) {
|
||||
// ab -n 1 -c 1 http://localhost:8080/
|
||||
benchmarkReadRequest(b, `GET / HTTP/1.0
|
||||
Host: localhost:8080
|
||||
User-Agent: ApacheBench/2.3
|
||||
Accept: */*
|
||||
`)
|
||||
}
|
||||
|
||||
func BenchmarkReadRequestSiege(b *testing.B) {
|
||||
// siege -r 1 -c 1 http://localhost:8080/
|
||||
benchmarkReadRequest(b, `GET / HTTP/1.1
|
||||
Host: localhost:8080
|
||||
Accept: */*
|
||||
Accept-Encoding: gzip
|
||||
User-Agent: JoeDog/1.00 [en] (X11; I; Siege 2.70)
|
||||
Connection: keep-alive
|
||||
`)
|
||||
}
|
||||
|
||||
func BenchmarkReadRequestWrk(b *testing.B) {
|
||||
// wrk -t 1 -r 1 -c 1 http://localhost:8080/
|
||||
benchmarkReadRequest(b, `GET / HTTP/1.1
|
||||
Host: localhost:8080
|
||||
`)
|
||||
}
|
|
@ -1,565 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type reqWriteTest struct {
|
||||
Req Request
|
||||
Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
|
||||
|
||||
// Any of these three may be empty to skip that test.
|
||||
WantWrite string // Request.Write
|
||||
WantProxy string // Request.WriteProxy
|
||||
|
||||
WantError error // wanted error from Request.Write
|
||||
}
|
||||
|
||||
var reqWriteTests = []reqWriteTest{
|
||||
// HTTP/1.1 => chunked coding; no body; no trailer
|
||||
{
|
||||
Req: Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.techcrunch.com",
|
||||
Path: "/",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{
|
||||
"Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
|
||||
"Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"},
|
||||
"Accept-Encoding": {"gzip,deflate"},
|
||||
"Accept-Language": {"en-us,en;q=0.5"},
|
||||
"Keep-Alive": {"300"},
|
||||
"Proxy-Connection": {"keep-alive"},
|
||||
"User-Agent": {"Fake"},
|
||||
},
|
||||
Body: nil,
|
||||
Close: false,
|
||||
Host: "www.techcrunch.com",
|
||||
Form: map[string][]string{},
|
||||
},
|
||||
|
||||
WantWrite: "GET / HTTP/1.1\r\n" +
|
||||
"Host: www.techcrunch.com\r\n" +
|
||||
"User-Agent: Fake\r\n" +
|
||||
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
|
||||
"Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
|
||||
"Accept-Encoding: gzip,deflate\r\n" +
|
||||
"Accept-Language: en-us,en;q=0.5\r\n" +
|
||||
"Keep-Alive: 300\r\n" +
|
||||
"Proxy-Connection: keep-alive\r\n\r\n",
|
||||
|
||||
WantProxy: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" +
|
||||
"Host: www.techcrunch.com\r\n" +
|
||||
"User-Agent: Fake\r\n" +
|
||||
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" +
|
||||
"Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" +
|
||||
"Accept-Encoding: gzip,deflate\r\n" +
|
||||
"Accept-Language: en-us,en;q=0.5\r\n" +
|
||||
"Keep-Alive: 300\r\n" +
|
||||
"Proxy-Connection: keep-alive\r\n\r\n",
|
||||
},
|
||||
// HTTP/1.1 => chunked coding; body; empty trailer
|
||||
{
|
||||
Req: Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/search",
|
||||
},
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
TransferEncoding: []string{"chunked"},
|
||||
},
|
||||
|
||||
Body: []byte("abcdef"),
|
||||
|
||||
WantWrite: "GET /search HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
chunk("abcdef") + chunk(""),
|
||||
|
||||
WantProxy: "GET http://www.google.com/search HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
chunk("abcdef") + chunk(""),
|
||||
},
|
||||
// HTTP/1.1 POST => chunked coding; body; empty trailer
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/search",
|
||||
},
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
Close: true,
|
||||
TransferEncoding: []string{"chunked"},
|
||||
},
|
||||
|
||||
Body: []byte("abcdef"),
|
||||
|
||||
WantWrite: "POST /search HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
chunk("abcdef") + chunk(""),
|
||||
|
||||
WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
chunk("abcdef") + chunk(""),
|
||||
},
|
||||
|
||||
// HTTP/1.1 POST with Content-Length, no chunking
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/search",
|
||||
},
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
Close: true,
|
||||
ContentLength: 6,
|
||||
},
|
||||
|
||||
Body: []byte("abcdef"),
|
||||
|
||||
WantWrite: "POST /search HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"Content-Length: 6\r\n" +
|
||||
"\r\n" +
|
||||
"abcdef",
|
||||
|
||||
WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"Content-Length: 6\r\n" +
|
||||
"\r\n" +
|
||||
"abcdef",
|
||||
},
|
||||
|
||||
// HTTP/1.1 POST with Content-Length in headers
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: mustParseURL("http://example.com/"),
|
||||
Host: "example.com",
|
||||
Header: Header{
|
||||
"Content-Length": []string{"10"}, // ignored
|
||||
},
|
||||
ContentLength: 6,
|
||||
},
|
||||
|
||||
Body: []byte("abcdef"),
|
||||
|
||||
WantWrite: "POST / HTTP/1.1\r\n" +
|
||||
"Host: example.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Content-Length: 6\r\n" +
|
||||
"\r\n" +
|
||||
"abcdef",
|
||||
|
||||
WantProxy: "POST http://example.com/ HTTP/1.1\r\n" +
|
||||
"Host: example.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Content-Length: 6\r\n" +
|
||||
"\r\n" +
|
||||
"abcdef",
|
||||
},
|
||||
|
||||
// default to HTTP/1.1
|
||||
{
|
||||
Req: Request{
|
||||
Method: "GET",
|
||||
URL: mustParseURL("/search"),
|
||||
Host: "www.google.com",
|
||||
},
|
||||
|
||||
WantWrite: "GET /search HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"\r\n",
|
||||
},
|
||||
|
||||
// Request with a 0 ContentLength and a 0 byte body.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: mustParseURL("/"),
|
||||
Host: "example.com",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 0, // as if unset by user
|
||||
},
|
||||
|
||||
Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 0)) },
|
||||
|
||||
// RFC 2616 Section 14.13 says Content-Length should be specified
|
||||
// unless body is prohibited by the request method.
|
||||
// Also, nginx expects it for POST and PUT.
|
||||
WantWrite: "POST / HTTP/1.1\r\n" +
|
||||
"Host: example.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Content-Length: 0\r\n" +
|
||||
"\r\n",
|
||||
|
||||
WantProxy: "POST / HTTP/1.1\r\n" +
|
||||
"Host: example.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Content-Length: 0\r\n" +
|
||||
"\r\n",
|
||||
},
|
||||
|
||||
// Request with a 0 ContentLength and a 1 byte body.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: mustParseURL("/"),
|
||||
Host: "example.com",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 0, // as if unset by user
|
||||
},
|
||||
|
||||
Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 1)) },
|
||||
|
||||
WantWrite: "POST / HTTP/1.1\r\n" +
|
||||
"Host: example.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
chunk("x") + chunk(""),
|
||||
|
||||
WantProxy: "POST / HTTP/1.1\r\n" +
|
||||
"Host: example.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
chunk("x") + chunk(""),
|
||||
},
|
||||
|
||||
// Request with a ContentLength of 10 but a 5 byte body.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: mustParseURL("/"),
|
||||
Host: "example.com",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 10, // but we're going to send only 5 bytes
|
||||
},
|
||||
Body: []byte("12345"),
|
||||
WantError: errors.New("http: Request.ContentLength=10 with Body length 5"),
|
||||
},
|
||||
|
||||
// Request with a ContentLength of 4 but an 8 byte body.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: mustParseURL("/"),
|
||||
Host: "example.com",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 4, // but we're going to try to send 8 bytes
|
||||
},
|
||||
Body: []byte("12345678"),
|
||||
WantError: errors.New("http: Request.ContentLength=4 with Body length 8"),
|
||||
},
|
||||
|
||||
// Request with a 5 ContentLength and nil body.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: mustParseURL("/"),
|
||||
Host: "example.com",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 5, // but we'll omit the body
|
||||
},
|
||||
WantError: errors.New("http: Request.ContentLength=5 with nil Body"),
|
||||
},
|
||||
|
||||
// Request with a 0 ContentLength and a body with 1 byte content and an error.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: mustParseURL("/"),
|
||||
Host: "example.com",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 0, // as if unset by user
|
||||
},
|
||||
|
||||
Body: func() io.ReadCloser {
|
||||
err := errors.New("Custom reader error")
|
||||
errReader := &errorReader{err}
|
||||
return ioutil.NopCloser(io.MultiReader(strings.NewReader("x"), errReader))
|
||||
},
|
||||
|
||||
WantError: errors.New("Custom reader error"),
|
||||
},
|
||||
|
||||
// Request with a 0 ContentLength and a body without content and an error.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "POST",
|
||||
URL: mustParseURL("/"),
|
||||
Host: "example.com",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 0, // as if unset by user
|
||||
},
|
||||
|
||||
Body: func() io.ReadCloser {
|
||||
err := errors.New("Custom reader error")
|
||||
errReader := &errorReader{err}
|
||||
return ioutil.NopCloser(errReader)
|
||||
},
|
||||
|
||||
WantError: errors.New("Custom reader error"),
|
||||
},
|
||||
|
||||
// Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
|
||||
// and doesn't add a User-Agent.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "GET",
|
||||
URL: mustParseURL("/foo"),
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Header: Header{
|
||||
"X-Foo": []string{"X-Bar"},
|
||||
},
|
||||
},
|
||||
|
||||
WantWrite: "GET /foo HTTP/1.1\r\n" +
|
||||
"Host: \r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"X-Foo: X-Bar\r\n\r\n",
|
||||
},
|
||||
|
||||
// If no Request.Host and no Request.URL.Host, we send
|
||||
// an empty Host header, and don't use
|
||||
// Request.Header["Host"]. This is just testing that
|
||||
// we don't change Go 1.0 behavior.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "GET",
|
||||
Host: "",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "",
|
||||
Path: "/search",
|
||||
},
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{
|
||||
"Host": []string{"bad.example.com"},
|
||||
},
|
||||
},
|
||||
|
||||
WantWrite: "GET /search HTTP/1.1\r\n" +
|
||||
"Host: \r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n\r\n",
|
||||
},
|
||||
|
||||
// Opaque test #1 from golang.org/issue/4860
|
||||
{
|
||||
Req: Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Opaque: "/%2F/%2F/",
|
||||
},
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
},
|
||||
|
||||
WantWrite: "GET /%2F/%2F/ HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n\r\n",
|
||||
},
|
||||
|
||||
// Opaque test #2 from golang.org/issue/4860
|
||||
{
|
||||
Req: Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "x.google.com",
|
||||
Opaque: "//y.google.com/%2F/%2F/",
|
||||
},
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
},
|
||||
|
||||
WantWrite: "GET http://y.google.com/%2F/%2F/ HTTP/1.1\r\n" +
|
||||
"Host: x.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n\r\n",
|
||||
},
|
||||
|
||||
// Testing custom case in header keys. Issue 5022.
|
||||
{
|
||||
Req: Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "www.google.com",
|
||||
Path: "/",
|
||||
},
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{
|
||||
"ALL-CAPS": {"x"},
|
||||
},
|
||||
},
|
||||
|
||||
WantWrite: "GET / HTTP/1.1\r\n" +
|
||||
"Host: www.google.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"ALL-CAPS: x\r\n" +
|
||||
"\r\n",
|
||||
},
|
||||
}
|
||||
|
||||
func TestRequestWrite(t *testing.T) {
|
||||
for i := range reqWriteTests {
|
||||
tt := &reqWriteTests[i]
|
||||
|
||||
setBody := func() {
|
||||
if tt.Body == nil {
|
||||
return
|
||||
}
|
||||
switch b := tt.Body.(type) {
|
||||
case []byte:
|
||||
tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
case func() io.ReadCloser:
|
||||
tt.Req.Body = b()
|
||||
}
|
||||
}
|
||||
setBody()
|
||||
if tt.Req.Header == nil {
|
||||
tt.Req.Header = make(Header)
|
||||
}
|
||||
|
||||
var braw bytes.Buffer
|
||||
err := tt.Req.Write(&braw)
|
||||
if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.WantError); g != e {
|
||||
t.Errorf("writing #%d, err = %q, want %q", i, g, e)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if tt.WantWrite != "" {
|
||||
sraw := braw.String()
|
||||
if sraw != tt.WantWrite {
|
||||
t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantWrite, sraw)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tt.WantProxy != "" {
|
||||
setBody()
|
||||
var praw bytes.Buffer
|
||||
err = tt.Req.WriteProxy(&praw)
|
||||
if err != nil {
|
||||
t.Errorf("WriteProxy #%d: %s", i, err)
|
||||
continue
|
||||
}
|
||||
sraw := praw.String()
|
||||
if sraw != tt.WantProxy {
|
||||
t.Errorf("Test Proxy %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantProxy, sraw)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type closeChecker struct {
|
||||
io.Reader
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (rc *closeChecker) Close() error {
|
||||
rc.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestRequestWriteClosesBody tests that Request.Write does close its request.Body.
|
||||
// It also indirectly tests NewRequest and that it doesn't wrap an existing Closer
|
||||
// inside a NopCloser, and that it serializes it correctly.
|
||||
func TestRequestWriteClosesBody(t *testing.T) {
|
||||
rc := &closeChecker{Reader: strings.NewReader("my body")}
|
||||
req, _ := NewRequest("POST", "http://foo.com/", rc)
|
||||
if req.ContentLength != 0 {
|
||||
t.Errorf("got req.ContentLength %d, want 0", req.ContentLength)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
req.Write(buf)
|
||||
if !rc.closed {
|
||||
t.Error("body not closed after write")
|
||||
}
|
||||
expected := "POST / HTTP/1.1\r\n" +
|
||||
"Host: foo.com\r\n" +
|
||||
"User-Agent: Go 1.1 package http\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
// TODO: currently we don't buffer before chunking, so we get a
|
||||
// single "m" chunk before the other chunks, as this was the 1-byte
|
||||
// read from our MultiReader where we stiched the Body back together
|
||||
// after sniffing whether the Body was 0 bytes or not.
|
||||
chunk("m") +
|
||||
chunk("y body") +
|
||||
chunk("")
|
||||
if buf.String() != expected {
|
||||
t.Errorf("write:\n got: %s\nwant: %s", buf.String(), expected)
|
||||
}
|
||||
}
|
||||
|
||||
func chunk(s string) string {
|
||||
return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
|
||||
}
|
||||
|
||||
func mustParseURL(s string) *url.URL {
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
|
||||
}
|
||||
return u
|
||||
}
|
|
@ -1,645 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type respTest struct {
|
||||
Raw string
|
||||
Resp Response
|
||||
Body string
|
||||
}
|
||||
|
||||
func dummyReq(method string) *Request {
|
||||
return &Request{Method: method}
|
||||
}
|
||||
|
||||
func dummyReq11(method string) *Request {
|
||||
return &Request{Method: method, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1}
|
||||
}
|
||||
|
||||
var respTests = []respTest{
|
||||
// Unchunked response without Content-Length.
|
||||
{
|
||||
"HTTP/1.0 200 OK\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"\r\n" +
|
||||
"Body here\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.0",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{
|
||||
"Connection": {"close"}, // TODO(rsc): Delete?
|
||||
},
|
||||
Close: true,
|
||||
ContentLength: -1,
|
||||
},
|
||||
|
||||
"Body here\n",
|
||||
},
|
||||
|
||||
// Unchunked HTTP/1.1 response without Content-Length or
|
||||
// Connection headers.
|
||||
{
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"\r\n" +
|
||||
"Body here\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
Request: dummyReq("GET"),
|
||||
Close: true,
|
||||
ContentLength: -1,
|
||||
},
|
||||
|
||||
"Body here\n",
|
||||
},
|
||||
|
||||
// Unchunked HTTP/1.1 204 response without Content-Length.
|
||||
{
|
||||
"HTTP/1.1 204 No Content\r\n" +
|
||||
"\r\n" +
|
||||
"Body should not be read!\n",
|
||||
|
||||
Response{
|
||||
Status: "204 No Content",
|
||||
StatusCode: 204,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: Header{},
|
||||
Request: dummyReq("GET"),
|
||||
Close: false,
|
||||
ContentLength: 0,
|
||||
},
|
||||
|
||||
"",
|
||||
},
|
||||
|
||||
// Unchunked response with Content-Length.
|
||||
{
|
||||
"HTTP/1.0 200 OK\r\n" +
|
||||
"Content-Length: 10\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"\r\n" +
|
||||
"Body here\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.0",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{
|
||||
"Connection": {"close"},
|
||||
"Content-Length": {"10"},
|
||||
},
|
||||
Close: true,
|
||||
ContentLength: 10,
|
||||
},
|
||||
|
||||
"Body here\n",
|
||||
},
|
||||
|
||||
// Chunked response without Content-Length.
|
||||
{
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n" +
|
||||
"\r\n" +
|
||||
"0a\r\n" +
|
||||
"Body here\n\r\n" +
|
||||
"09\r\n" +
|
||||
"continued\r\n" +
|
||||
"0\r\n" +
|
||||
"\r\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{},
|
||||
Close: false,
|
||||
ContentLength: -1,
|
||||
TransferEncoding: []string{"chunked"},
|
||||
},
|
||||
|
||||
"Body here\ncontinued",
|
||||
},
|
||||
|
||||
// Chunked response with Content-Length.
|
||||
{
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n" +
|
||||
"Content-Length: 10\r\n" +
|
||||
"\r\n" +
|
||||
"0a\r\n" +
|
||||
"Body here\n\r\n" +
|
||||
"0\r\n" +
|
||||
"\r\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{},
|
||||
Close: false,
|
||||
ContentLength: -1,
|
||||
TransferEncoding: []string{"chunked"},
|
||||
},
|
||||
|
||||
"Body here\n",
|
||||
},
|
||||
|
||||
// Chunked response in response to a HEAD request
|
||||
{
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n" +
|
||||
"\r\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq("HEAD"),
|
||||
Header: Header{},
|
||||
TransferEncoding: []string{"chunked"},
|
||||
Close: false,
|
||||
ContentLength: -1,
|
||||
},
|
||||
|
||||
"",
|
||||
},
|
||||
|
||||
// Content-Length in response to a HEAD request
|
||||
{
|
||||
"HTTP/1.0 200 OK\r\n" +
|
||||
"Content-Length: 256\r\n" +
|
||||
"\r\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.0",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Request: dummyReq("HEAD"),
|
||||
Header: Header{"Content-Length": {"256"}},
|
||||
TransferEncoding: nil,
|
||||
Close: true,
|
||||
ContentLength: 256,
|
||||
},
|
||||
|
||||
"",
|
||||
},
|
||||
|
||||
// Content-Length in response to a HEAD request with HTTP/1.1
|
||||
{
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Content-Length: 256\r\n" +
|
||||
"\r\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq("HEAD"),
|
||||
Header: Header{"Content-Length": {"256"}},
|
||||
TransferEncoding: nil,
|
||||
Close: false,
|
||||
ContentLength: 256,
|
||||
},
|
||||
|
||||
"",
|
||||
},
|
||||
|
||||
// No Content-Length or Chunked in response to a HEAD request
|
||||
{
|
||||
"HTTP/1.0 200 OK\r\n" +
|
||||
"\r\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.0",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Request: dummyReq("HEAD"),
|
||||
Header: Header{},
|
||||
TransferEncoding: nil,
|
||||
Close: true,
|
||||
ContentLength: -1,
|
||||
},
|
||||
|
||||
"",
|
||||
},
|
||||
|
||||
// explicit Content-Length of 0.
|
||||
{
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Content-Length: 0\r\n" +
|
||||
"\r\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{
|
||||
"Content-Length": {"0"},
|
||||
},
|
||||
Close: false,
|
||||
ContentLength: 0,
|
||||
},
|
||||
|
||||
"",
|
||||
},
|
||||
|
||||
// Status line without a Reason-Phrase, but trailing space.
|
||||
// (permitted by RFC 2616)
|
||||
{
|
||||
"HTTP/1.0 303 \r\n\r\n",
|
||||
Response{
|
||||
Status: "303 ",
|
||||
StatusCode: 303,
|
||||
Proto: "HTTP/1.0",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{},
|
||||
Close: true,
|
||||
ContentLength: -1,
|
||||
},
|
||||
|
||||
"",
|
||||
},
|
||||
|
||||
// Status line without a Reason-Phrase, and no trailing space.
|
||||
// (not permitted by RFC 2616, but we'll accept it anyway)
|
||||
{
|
||||
"HTTP/1.0 303\r\n\r\n",
|
||||
Response{
|
||||
Status: "303 ",
|
||||
StatusCode: 303,
|
||||
Proto: "HTTP/1.0",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{},
|
||||
Close: true,
|
||||
ContentLength: -1,
|
||||
},
|
||||
|
||||
"",
|
||||
},
|
||||
|
||||
// golang.org/issue/4767: don't special-case multipart/byteranges responses
|
||||
{
|
||||
`HTTP/1.1 206 Partial Content
|
||||
Connection: close
|
||||
Content-Type: multipart/byteranges; boundary=18a75608c8f47cef
|
||||
|
||||
some body`,
|
||||
Response{
|
||||
Status: "206 Partial Content",
|
||||
StatusCode: 206,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{
|
||||
"Content-Type": []string{"multipart/byteranges; boundary=18a75608c8f47cef"},
|
||||
},
|
||||
Close: true,
|
||||
ContentLength: -1,
|
||||
},
|
||||
|
||||
"some body",
|
||||
},
|
||||
|
||||
// Unchunked response without Content-Length, Request is nil
|
||||
{
|
||||
"HTTP/1.0 200 OK\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"\r\n" +
|
||||
"Body here\n",
|
||||
|
||||
Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Proto: "HTTP/1.0",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Header: Header{
|
||||
"Connection": {"close"}, // TODO(rsc): Delete?
|
||||
},
|
||||
Close: true,
|
||||
ContentLength: -1,
|
||||
},
|
||||
|
||||
"Body here\n",
|
||||
},
|
||||
}
|
||||
|
||||
func TestReadResponse(t *testing.T) {
|
||||
for i, tt := range respTests {
|
||||
resp, err := ReadResponse(bufio.NewReader(strings.NewReader(tt.Raw)), tt.Resp.Request)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
rbody := resp.Body
|
||||
resp.Body = nil
|
||||
diff(t, fmt.Sprintf("#%d Response", i), resp, &tt.Resp)
|
||||
var bout bytes.Buffer
|
||||
if rbody != nil {
|
||||
_, err = io.Copy(&bout, rbody)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
rbody.Close()
|
||||
}
|
||||
body := bout.String()
|
||||
if body != tt.Body {
|
||||
t.Errorf("#%d: Body = %q want %q", i, body, tt.Body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteResponse(t *testing.T) {
|
||||
for i, tt := range respTests {
|
||||
resp, err := ReadResponse(bufio.NewReader(strings.NewReader(tt.Raw)), tt.Resp.Request)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
err = resp.Write(ioutil.Discard)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var readResponseCloseInMiddleTests = []struct {
|
||||
chunked, compressed bool
|
||||
}{
|
||||
{false, false},
|
||||
{true, false},
|
||||
{true, true},
|
||||
}
|
||||
|
||||
// TestReadResponseCloseInMiddle tests that closing a body after
|
||||
// reading only part of its contents advances the read to the end of
|
||||
// the request, right up until the next request.
|
||||
func TestReadResponseCloseInMiddle(t *testing.T) {
|
||||
for _, test := range readResponseCloseInMiddleTests {
|
||||
fatalf := func(format string, args ...interface{}) {
|
||||
args = append([]interface{}{test.chunked, test.compressed}, args...)
|
||||
t.Fatalf("on test chunked=%v, compressed=%v: "+format, args...)
|
||||
}
|
||||
checkErr := func(err error, msg string) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
fatalf(msg+": %v", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("HTTP/1.1 200 OK\r\n")
|
||||
if test.chunked {
|
||||
buf.WriteString("Transfer-Encoding: chunked\r\n")
|
||||
} else {
|
||||
buf.WriteString("Content-Length: 1000000\r\n")
|
||||
}
|
||||
var wr io.Writer = &buf
|
||||
if test.chunked {
|
||||
wr = newChunkedWriter(wr)
|
||||
}
|
||||
if test.compressed {
|
||||
buf.WriteString("Content-Encoding: gzip\r\n")
|
||||
wr = gzip.NewWriter(wr)
|
||||
}
|
||||
buf.WriteString("\r\n")
|
||||
|
||||
chunk := bytes.Repeat([]byte{'x'}, 1000)
|
||||
for i := 0; i < 1000; i++ {
|
||||
if test.compressed {
|
||||
// Otherwise this compresses too well.
|
||||
_, err := io.ReadFull(rand.Reader, chunk)
|
||||
checkErr(err, "rand.Reader ReadFull")
|
||||
}
|
||||
wr.Write(chunk)
|
||||
}
|
||||
if test.compressed {
|
||||
err := wr.(*gzip.Writer).Close()
|
||||
checkErr(err, "compressor close")
|
||||
}
|
||||
if test.chunked {
|
||||
buf.WriteString("0\r\n\r\n")
|
||||
}
|
||||
buf.WriteString("Next Request Here")
|
||||
|
||||
bufr := bufio.NewReader(&buf)
|
||||
resp, err := ReadResponse(bufr, dummyReq("GET"))
|
||||
checkErr(err, "ReadResponse")
|
||||
expectedLength := int64(-1)
|
||||
if !test.chunked {
|
||||
expectedLength = 1000000
|
||||
}
|
||||
if resp.ContentLength != expectedLength {
|
||||
fatalf("expected response length %d, got %d", expectedLength, resp.ContentLength)
|
||||
}
|
||||
if resp.Body == nil {
|
||||
fatalf("nil body")
|
||||
}
|
||||
if test.compressed {
|
||||
gzReader, err := gzip.NewReader(resp.Body)
|
||||
checkErr(err, "gzip.NewReader")
|
||||
resp.Body = &readerAndCloser{gzReader, resp.Body}
|
||||
}
|
||||
|
||||
rbuf := make([]byte, 2500)
|
||||
n, err := io.ReadFull(resp.Body, rbuf)
|
||||
checkErr(err, "2500 byte ReadFull")
|
||||
if n != 2500 {
|
||||
fatalf("ReadFull only read %d bytes", n)
|
||||
}
|
||||
if test.compressed == false && !bytes.Equal(bytes.Repeat([]byte{'x'}, 2500), rbuf) {
|
||||
fatalf("ReadFull didn't read 2500 'x'; got %q", string(rbuf))
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
rest, err := ioutil.ReadAll(bufr)
|
||||
checkErr(err, "ReadAll on remainder")
|
||||
if e, g := "Next Request Here", string(rest); e != g {
|
||||
g = regexp.MustCompile(`(xx+)`).ReplaceAllStringFunc(g, func(match string) string {
|
||||
return fmt.Sprintf("x(repeated x%d)", len(match))
|
||||
})
|
||||
fatalf("remainder = %q, expected %q", g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func diff(t *testing.T, prefix string, have, want interface{}) {
|
||||
hv := reflect.ValueOf(have).Elem()
|
||||
wv := reflect.ValueOf(want).Elem()
|
||||
if hv.Type() != wv.Type() {
|
||||
t.Errorf("%s: type mismatch %v want %v", prefix, hv.Type(), wv.Type())
|
||||
}
|
||||
for i := 0; i < hv.NumField(); i++ {
|
||||
hf := hv.Field(i).Interface()
|
||||
wf := wv.Field(i).Interface()
|
||||
if !reflect.DeepEqual(hf, wf) {
|
||||
t.Errorf("%s: %s = %v want %v", prefix, hv.Type().Field(i).Name, hf, wf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type responseLocationTest struct {
|
||||
location string // Response's Location header or ""
|
||||
requrl string // Response.Request.URL or ""
|
||||
want string
|
||||
wantErr error
|
||||
}
|
||||
|
||||
var responseLocationTests = []responseLocationTest{
|
||||
{"/foo", "http://bar.com/baz", "http://bar.com/foo", nil},
|
||||
{"http://foo.com/", "http://bar.com/baz", "http://foo.com/", nil},
|
||||
{"", "http://bar.com/baz", "", ErrNoLocation},
|
||||
}
|
||||
|
||||
func TestLocationResponse(t *testing.T) {
|
||||
for i, tt := range responseLocationTests {
|
||||
res := new(Response)
|
||||
res.Header = make(Header)
|
||||
res.Header.Set("Location", tt.location)
|
||||
if tt.requrl != "" {
|
||||
res.Request = &Request{}
|
||||
var err error
|
||||
res.Request.URL, err = url.Parse(tt.requrl)
|
||||
if err != nil {
|
||||
t.Fatalf("bad test URL %q: %v", tt.requrl, err)
|
||||
}
|
||||
}
|
||||
|
||||
got, err := res.Location()
|
||||
if tt.wantErr != nil {
|
||||
if err == nil {
|
||||
t.Errorf("%d. err=nil; want %q", i, tt.wantErr)
|
||||
continue
|
||||
}
|
||||
if g, e := err.Error(), tt.wantErr.Error(); g != e {
|
||||
t.Errorf("%d. err=%q; want %q", i, g, e)
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("%d. err=%q", i, err)
|
||||
continue
|
||||
}
|
||||
if g, e := got.String(), tt.want; g != e {
|
||||
t.Errorf("%d. Location=%q; want %q", i, g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponseStatusStutter(t *testing.T) {
|
||||
r := &Response{
|
||||
Status: "123 some status",
|
||||
StatusCode: 123,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 3,
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
r.Write(&buf)
|
||||
if strings.Contains(buf.String(), "123 123") {
|
||||
t.Errorf("stutter in status: %s", buf.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponseContentLengthShortBody(t *testing.T) {
|
||||
const shortBody = "Short body, not 123 bytes."
|
||||
br := bufio.NewReader(strings.NewReader("HTTP/1.1 200 OK\r\n" +
|
||||
"Content-Length: 123\r\n" +
|
||||
"\r\n" +
|
||||
shortBody))
|
||||
res, err := ReadResponse(br, &Request{Method: "GET"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.ContentLength != 123 {
|
||||
t.Fatalf("Content-Length = %d; want 123", res.ContentLength)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
n, err := io.Copy(&buf, res.Body)
|
||||
if n != int64(len(shortBody)) {
|
||||
t.Errorf("Copied %d bytes; want %d, len(%q)", n, len(shortBody), shortBody)
|
||||
}
|
||||
if buf.String() != shortBody {
|
||||
t.Errorf("Read body %q; want %q", buf.String(), shortBody)
|
||||
}
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Errorf("io.Copy error = %#v; want io.ErrUnexpectedEOF", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadResponseUnexpectedEOF(t *testing.T) {
|
||||
br := bufio.NewReader(strings.NewReader("HTTP/1.1 301 Moved Permanently\r\n" +
|
||||
"Location: http://example.com"))
|
||||
_, err := ReadResponse(br, nil)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Errorf("ReadResponse = %v; want io.ErrUnexpectedEOF", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedsSniff(t *testing.T) {
|
||||
// needsSniff returns true with an empty response.
|
||||
r := &response{}
|
||||
if got, want := r.needsSniff(), true; got != want {
|
||||
t.Errorf("needsSniff = %t; want %t", got, want)
|
||||
}
|
||||
// needsSniff returns false when Content-Type = nil.
|
||||
r.handlerHeader = Header{"Content-Type": nil}
|
||||
if got, want := r.needsSniff(), false; got != want {
|
||||
t.Errorf("needsSniff empty Content-Type = %t; want %t", got, want)
|
||||
}
|
||||
}
|
|
@ -1,226 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type respWriteTest struct {
|
||||
Resp Response
|
||||
Raw string
|
||||
}
|
||||
|
||||
func TestResponseWrite(t *testing.T) {
|
||||
respWriteTests := []respWriteTest{
|
||||
// HTTP/1.0, identity coding; no trailer
|
||||
{
|
||||
Response{
|
||||
StatusCode: 503,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{},
|
||||
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
|
||||
ContentLength: 6,
|
||||
},
|
||||
|
||||
"HTTP/1.0 503 Service Unavailable\r\n" +
|
||||
"Content-Length: 6\r\n\r\n" +
|
||||
"abcdef",
|
||||
},
|
||||
// Unchunked response without Content-Length.
|
||||
{
|
||||
Response{
|
||||
StatusCode: 200,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 0,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{},
|
||||
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
|
||||
ContentLength: -1,
|
||||
},
|
||||
"HTTP/1.0 200 OK\r\n" +
|
||||
"\r\n" +
|
||||
"abcdef",
|
||||
},
|
||||
// HTTP/1.1 response with unknown length and Connection: close
|
||||
{
|
||||
Response{
|
||||
StatusCode: 200,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{},
|
||||
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
|
||||
ContentLength: -1,
|
||||
Close: true,
|
||||
},
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"\r\n" +
|
||||
"abcdef",
|
||||
},
|
||||
// HTTP/1.1 response with unknown length and not setting connection: close
|
||||
{
|
||||
Response{
|
||||
StatusCode: 200,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq11("GET"),
|
||||
Header: Header{},
|
||||
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
|
||||
ContentLength: -1,
|
||||
Close: false,
|
||||
},
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"\r\n" +
|
||||
"abcdef",
|
||||
},
|
||||
// HTTP/1.1 response with unknown length and not setting connection: close, but
|
||||
// setting chunked.
|
||||
{
|
||||
Response{
|
||||
StatusCode: 200,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq11("GET"),
|
||||
Header: Header{},
|
||||
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
|
||||
ContentLength: -1,
|
||||
TransferEncoding: []string{"chunked"},
|
||||
Close: false,
|
||||
},
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
"6\r\nabcdef\r\n0\r\n\r\n",
|
||||
},
|
||||
// HTTP/1.1 response 0 content-length, and nil body
|
||||
{
|
||||
Response{
|
||||
StatusCode: 200,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq11("GET"),
|
||||
Header: Header{},
|
||||
Body: nil,
|
||||
ContentLength: 0,
|
||||
Close: false,
|
||||
},
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Content-Length: 0\r\n" +
|
||||
"\r\n",
|
||||
},
|
||||
// HTTP/1.1 response 0 content-length, and non-nil empty body
|
||||
{
|
||||
Response{
|
||||
StatusCode: 200,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq11("GET"),
|
||||
Header: Header{},
|
||||
Body: ioutil.NopCloser(strings.NewReader("")),
|
||||
ContentLength: 0,
|
||||
Close: false,
|
||||
},
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Content-Length: 0\r\n" +
|
||||
"\r\n",
|
||||
},
|
||||
// HTTP/1.1 response 0 content-length, and non-nil non-empty body
|
||||
{
|
||||
Response{
|
||||
StatusCode: 200,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq11("GET"),
|
||||
Header: Header{},
|
||||
Body: ioutil.NopCloser(strings.NewReader("foo")),
|
||||
ContentLength: 0,
|
||||
Close: false,
|
||||
},
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"\r\nfoo",
|
||||
},
|
||||
// HTTP/1.1, chunked coding; empty trailer; close
|
||||
{
|
||||
Response{
|
||||
StatusCode: 200,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{},
|
||||
Body: ioutil.NopCloser(strings.NewReader("abcdef")),
|
||||
ContentLength: 6,
|
||||
TransferEncoding: []string{"chunked"},
|
||||
Close: true,
|
||||
},
|
||||
|
||||
"HTTP/1.1 200 OK\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n\r\n" +
|
||||
"6\r\nabcdef\r\n0\r\n\r\n",
|
||||
},
|
||||
|
||||
// Header value with a newline character (Issue 914).
|
||||
// Also tests removal of leading and trailing whitespace.
|
||||
{
|
||||
Response{
|
||||
StatusCode: 204,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: dummyReq("GET"),
|
||||
Header: Header{
|
||||
"Foo": []string{" Bar\nBaz "},
|
||||
},
|
||||
Body: nil,
|
||||
ContentLength: 0,
|
||||
TransferEncoding: []string{"chunked"},
|
||||
Close: true,
|
||||
},
|
||||
|
||||
"HTTP/1.1 204 No Content\r\n" +
|
||||
"Connection: close\r\n" +
|
||||
"Foo: Bar Baz\r\n" +
|
||||
"\r\n",
|
||||
},
|
||||
|
||||
// Want a single Content-Length header. Fixing issue 8180 where
|
||||
// there were two.
|
||||
{
|
||||
Response{
|
||||
StatusCode: StatusOK,
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Request: &Request{Method: "POST"},
|
||||
Header: Header{},
|
||||
ContentLength: 0,
|
||||
TransferEncoding: nil,
|
||||
Body: nil,
|
||||
},
|
||||
"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n",
|
||||
},
|
||||
}
|
||||
|
||||
for i := range respWriteTests {
|
||||
tt := &respWriteTests[i]
|
||||
var braw bytes.Buffer
|
||||
err := tt.Resp.Write(&braw)
|
||||
if err != nil {
|
||||
t.Errorf("error writing #%d: %s", i, err)
|
||||
continue
|
||||
}
|
||||
sraw := braw.String()
|
||||
if sraw != tt.Raw {
|
||||
t.Errorf("Test %d, expecting:\n%q\nGot:\n%q\n", i, tt.Raw, sraw)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,171 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
. "net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var sniffTests = []struct {
|
||||
desc string
|
||||
data []byte
|
||||
contentType string
|
||||
}{
|
||||
// Some nonsense.
|
||||
{"Empty", []byte{}, "text/plain; charset=utf-8"},
|
||||
{"Binary", []byte{1, 2, 3}, "application/octet-stream"},
|
||||
|
||||
{"HTML document #1", []byte(`<HtMl><bOdY>blah blah blah</body></html>`), "text/html; charset=utf-8"},
|
||||
{"HTML document #2", []byte(`<HTML></HTML>`), "text/html; charset=utf-8"},
|
||||
{"HTML document #3 (leading whitespace)", []byte(` <!DOCTYPE HTML>...`), "text/html; charset=utf-8"},
|
||||
{"HTML document #4 (leading CRLF)", []byte("\r\n<html>..."), "text/html; charset=utf-8"},
|
||||
|
||||
{"Plain text", []byte(`This is not HTML. It has ☃ though.`), "text/plain; charset=utf-8"},
|
||||
|
||||
{"XML", []byte("\n<?xml!"), "text/xml; charset=utf-8"},
|
||||
|
||||
// Image types.
|
||||
{"GIF 87a", []byte(`GIF87a`), "image/gif"},
|
||||
{"GIF 89a", []byte(`GIF89a...`), "image/gif"},
|
||||
|
||||
// TODO(dsymonds): Re-enable this when the spec is sorted w.r.t. MP4.
|
||||
//{"MP4 video", []byte("\x00\x00\x00\x18ftypmp42\x00\x00\x00\x00mp42isom<\x06t\xbfmdat"), "video/mp4"},
|
||||
//{"MP4 audio", []byte("\x00\x00\x00\x20ftypM4A \x00\x00\x00\x00M4A mp42isom\x00\x00\x00\x00"), "audio/mp4"},
|
||||
}
|
||||
|
||||
func TestDetectContentType(t *testing.T) {
|
||||
for _, tt := range sniffTests {
|
||||
ct := DetectContentType(tt.data)
|
||||
if ct != tt.contentType {
|
||||
t.Errorf("%v: DetectContentType = %q, want %q", tt.desc, ct, tt.contentType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerContentType(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
i, _ := strconv.Atoi(r.FormValue("i"))
|
||||
tt := sniffTests[i]
|
||||
n, err := w.Write(tt.data)
|
||||
if n != len(tt.data) || err != nil {
|
||||
log.Fatalf("%v: Write(%q) = %v, %v want %d, nil", tt.desc, tt.data, n, err, len(tt.data))
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
for i, tt := range sniffTests {
|
||||
resp, err := Get(ts.URL + "/?i=" + strconv.Itoa(i))
|
||||
if err != nil {
|
||||
t.Errorf("%v: %v", tt.desc, err)
|
||||
continue
|
||||
}
|
||||
if ct := resp.Header.Get("Content-Type"); ct != tt.contentType {
|
||||
t.Errorf("%v: Content-Type = %q, want %q", tt.desc, ct, tt.contentType)
|
||||
}
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Errorf("%v: reading body: %v", tt.desc, err)
|
||||
} else if !bytes.Equal(data, tt.data) {
|
||||
t.Errorf("%v: data is %q, want %q", tt.desc, data, tt.data)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 5953: shouldn't sniff if the handler set a Content-Type header,
|
||||
// even if it's the empty string.
|
||||
func TestServerIssue5953(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
w.Header()["Content-Type"] = []string{""}
|
||||
fmt.Fprintf(w, "<html><head></head><body>hi</body></html>")
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
resp, err := Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got := resp.Header["Content-Type"]
|
||||
want := []string{""}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Content-Type = %q; want %q", got, want)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
func TestContentTypeWithCopy(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
|
||||
const (
|
||||
input = "\n<html>\n\t<head>\n"
|
||||
expected = "text/html; charset=utf-8"
|
||||
)
|
||||
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
// Use io.Copy from a bytes.Buffer to trigger ReadFrom.
|
||||
buf := bytes.NewBuffer([]byte(input))
|
||||
n, err := io.Copy(w, buf)
|
||||
if int(n) != len(input) || err != nil {
|
||||
t.Errorf("io.Copy(w, %q) = %v, %v want %d, nil", input, n, err, len(input))
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
resp, err := Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("Get: %v", err)
|
||||
}
|
||||
if ct := resp.Header.Get("Content-Type"); ct != expected {
|
||||
t.Errorf("Content-Type = %q, want %q", ct, expected)
|
||||
}
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Errorf("reading body: %v", err)
|
||||
} else if !bytes.Equal(data, []byte(input)) {
|
||||
t.Errorf("data is %q, want %q", data, input)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
func TestSniffWriteSize(t *testing.T) {
|
||||
defer afterTest(t)
|
||||
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
|
||||
size, _ := strconv.Atoi(r.FormValue("size"))
|
||||
written, err := io.WriteString(w, strings.Repeat("a", size))
|
||||
if err != nil {
|
||||
t.Errorf("write of %d bytes: %v", size, err)
|
||||
return
|
||||
}
|
||||
if written != size {
|
||||
t.Errorf("write of %d bytes wrote %d bytes", size, written)
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
for _, size := range []int{0, 1, 200, 600, 999, 1000, 1023, 1024, 512 << 10, 1 << 20} {
|
||||
res, err := Get(fmt.Sprintf("%s/?size=%d", ts.URL, size))
|
||||
if err != nil {
|
||||
t.Fatalf("size %d: %v", size, err)
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, res.Body); err != nil {
|
||||
t.Fatalf("size %d: io.Copy of body = %v", size, err)
|
||||
}
|
||||
if err := res.Body.Close(); err != nil {
|
||||
t.Fatalf("size %d: body Close = %v", size, err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
0123456789
|
|
@ -1 +0,0 @@
|
|||
index.html says hello
|
|
@ -1 +0,0 @@
|
|||
body {}
|
|
@ -1,64 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBodyReadBadTrailer(t *testing.T) {
|
||||
b := &body{
|
||||
src: strings.NewReader("foobar"),
|
||||
hdr: true, // force reading the trailer
|
||||
r: bufio.NewReader(strings.NewReader("")),
|
||||
}
|
||||
buf := make([]byte, 7)
|
||||
n, err := b.Read(buf[:3])
|
||||
got := string(buf[:n])
|
||||
if got != "foo" || err != nil {
|
||||
t.Fatalf(`first Read = %d (%q), %v; want 3 ("foo")`, n, got, err)
|
||||
}
|
||||
|
||||
n, err = b.Read(buf[:])
|
||||
got = string(buf[:n])
|
||||
if got != "bar" || err != nil {
|
||||
t.Fatalf(`second Read = %d (%q), %v; want 3 ("bar")`, n, got, err)
|
||||
}
|
||||
|
||||
n, err = b.Read(buf[:])
|
||||
got = string(buf[:n])
|
||||
if err == nil {
|
||||
t.Errorf("final Read was successful (%q), expected error from trailer read", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalChunkedBodyReadEOF(t *testing.T) {
|
||||
res, err := ReadResponse(bufio.NewReader(strings.NewReader(
|
||||
"HTTP/1.1 200 OK\r\n"+
|
||||
"Transfer-Encoding: chunked\r\n"+
|
||||
"\r\n"+
|
||||
"0a\r\n"+
|
||||
"Body here\n\r\n"+
|
||||
"09\r\n"+
|
||||
"continued\r\n"+
|
||||
"0\r\n"+
|
||||
"\r\n")), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := "Body here\ncontinued"
|
||||
buf := make([]byte, len(want))
|
||||
n, err := res.Body.Read(buf)
|
||||
if n != len(want) || err != io.EOF {
|
||||
t.Logf("body = %#v", res.Body)
|
||||
t.Errorf("Read = %v, %v; want %d, EOF", n, err, len(want))
|
||||
}
|
||||
if string(buf) != want {
|
||||
t.Errorf("buf = %q; want %q", buf, want)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,97 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func interestingGoroutines() (gs []string) {
|
||||
buf := make([]byte, 2<<20)
|
||||
buf = buf[:runtime.Stack(buf, true)]
|
||||
for _, g := range strings.Split(string(buf), "\n\n") {
|
||||
sl := strings.SplitN(g, "\n", 2)
|
||||
if len(sl) != 2 {
|
||||
continue
|
||||
}
|
||||
stack := strings.TrimSpace(sl[1])
|
||||
if stack == "" ||
|
||||
strings.Contains(stack, "created by net.startServer") ||
|
||||
strings.Contains(stack, "created by testing.RunTests") ||
|
||||
strings.Contains(stack, "closeWriteAndWait") ||
|
||||
strings.Contains(stack, "testing.Main(") ||
|
||||
// These only show up with GOTRACEBACK=2; Issue 5005 (comment 28)
|
||||
strings.Contains(stack, "runtime.goexit") ||
|
||||
strings.Contains(stack, "created by runtime.gc") ||
|
||||
strings.Contains(stack, "runtime.MHeap_Scavenger") {
|
||||
continue
|
||||
}
|
||||
gs = append(gs, stack)
|
||||
}
|
||||
sort.Strings(gs)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the other tests didn't leave any goroutines running.
|
||||
// This is in a file named z_last_test.go so it sorts at the end.
|
||||
func TestGoroutinesRunning(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("not counting goroutines for leakage in -short mode")
|
||||
}
|
||||
gs := interestingGoroutines()
|
||||
|
||||
n := 0
|
||||
stackCount := make(map[string]int)
|
||||
for _, g := range gs {
|
||||
stackCount[g]++
|
||||
n++
|
||||
}
|
||||
|
||||
t.Logf("num goroutines = %d", n)
|
||||
if n > 0 {
|
||||
t.Error("Too many goroutines.")
|
||||
for stack, count := range stackCount {
|
||||
t.Logf("%d instances of:\n%s", count, stack)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func afterTest(t *testing.T) {
|
||||
http.DefaultTransport.(*http.Transport).CloseIdleConnections()
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
var bad string
|
||||
badSubstring := map[string]string{
|
||||
").readLoop(": "a Transport",
|
||||
").writeLoop(": "a Transport",
|
||||
"created by net/http/httptest.(*Server).Start": "an httptest.Server",
|
||||
"timeoutHandler": "a TimeoutHandler",
|
||||
"net.(*netFD).connect(": "a timing out dial",
|
||||
").noteClientGone(": "a closenotifier sender",
|
||||
}
|
||||
var stacks string
|
||||
for i := 0; i < 4; i++ {
|
||||
bad = ""
|
||||
stacks = strings.Join(interestingGoroutines(), "\n\n")
|
||||
for substr, what := range badSubstring {
|
||||
if strings.Contains(stacks, substr) {
|
||||
bad = what
|
||||
}
|
||||
}
|
||||
if bad == "" {
|
||||
return
|
||||
}
|
||||
// Bad stuff found, but goroutines might just still be
|
||||
// shutting down, so give it some time.
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks)
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tls
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRoundUp(t *testing.T) {
|
||||
if roundUp(0, 16) != 0 ||
|
||||
roundUp(1, 16) != 16 ||
|
||||
roundUp(15, 16) != 16 ||
|
||||
roundUp(16, 16) != 16 ||
|
||||
roundUp(17, 16) != 32 {
|
||||
t.Error("roundUp broken")
|
||||
}
|
||||
}
|
||||
|
||||
var paddingTests = []struct {
|
||||
in []byte
|
||||
good bool
|
||||
expectedLen int
|
||||
}{
|
||||
{[]byte{1, 2, 3, 4, 0}, true, 4},
|
||||
{[]byte{1, 2, 3, 4, 0, 1}, false, 0},
|
||||
{[]byte{1, 2, 3, 4, 99, 99}, false, 0},
|
||||
{[]byte{1, 2, 3, 4, 1, 1}, true, 4},
|
||||
{[]byte{1, 2, 3, 2, 2, 2}, true, 3},
|
||||
{[]byte{1, 2, 3, 3, 3, 3}, true, 2},
|
||||
{[]byte{1, 2, 3, 4, 3, 3}, false, 0},
|
||||
{[]byte{1, 4, 4, 4, 4, 4}, true, 1},
|
||||
{[]byte{5, 5, 5, 5, 5, 5}, true, 0},
|
||||
{[]byte{6, 6, 6, 6, 6, 6}, false, 0},
|
||||
}
|
||||
|
||||
func TestRemovePadding(t *testing.T) {
|
||||
for i, test := range paddingTests {
|
||||
payload, good := removePadding(test.in)
|
||||
expectedGood := byte(255)
|
||||
if !test.good {
|
||||
expectedGood = 0
|
||||
}
|
||||
if good != expectedGood {
|
||||
t.Errorf("#%d: wrong validity, want:%d got:%d", i, expectedGood, good)
|
||||
}
|
||||
if good == 255 && len(payload) != test.expectedLen {
|
||||
t.Errorf("#%d: got %d, want %d", i, len(payload), test.expectedLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var certExampleCom = `308201403081eda003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313138353835325a170d3132303933303138353835325a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31a301830160603551d11040f300d820b6578616d706c652e636f6d300b06092a864886f70d0101050341001a0b419d2c74474c6450654e5f10b32bf426ffdf55cad1c52602e7a9151513a3424c70f5960dcd682db0c33769cc1daa3fcdd3db10809d2392ed4a1bf50ced18`
|
||||
|
||||
var certWildcardExampleCom = `308201423081efa003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303034365a170d3132303933303139303034365a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31c301a30180603551d110411300f820d2a2e6578616d706c652e636f6d300b06092a864886f70d0101050341001676f0c9e7c33c1b656ed5a6476c4e2ee9ec8e62df7407accb1875272b2edd0a22096cb2c22598d11604104d604f810eb4b5987ca6bb319c7e6ce48725c54059`
|
||||
|
||||
var certFooExampleCom = `308201443081f1a003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303131345a170d3132303933303139303131345a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31e301c301a0603551d1104133011820f666f6f2e6578616d706c652e636f6d300b06092a864886f70d010105034100646a2a51f2aa2477add854b462cf5207ba16d3213ffb5d3d0eed473fbf09935019192d1d5b8ca6a2407b424cf04d97c4cd9197c83ecf81f0eab9464a1109d09f`
|
||||
|
||||
var certDoubleWildcardExampleCom = `308201443081f1a003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303134315a170d3132303933303139303134315a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31e301c301a0603551d1104133011820f2a2e2a2e6578616d706c652e636f6d300b06092a864886f70d0101050341001c3de267975f56ef57771c6218ef95ecc65102e57bd1defe6f7efea90d9b26cf40de5bd7ad75e46201c7f2a92aaa3e907451e9409f65e28ddb6db80d726290f6`
|
||||
|
||||
func TestCertificateSelection(t *testing.T) {
|
||||
config := Config{
|
||||
Certificates: []Certificate{
|
||||
{
|
||||
Certificate: [][]byte{fromHex(certExampleCom)},
|
||||
},
|
||||
{
|
||||
Certificate: [][]byte{fromHex(certWildcardExampleCom)},
|
||||
},
|
||||
{
|
||||
Certificate: [][]byte{fromHex(certFooExampleCom)},
|
||||
},
|
||||
{
|
||||
Certificate: [][]byte{fromHex(certDoubleWildcardExampleCom)},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
config.BuildNameToCertificate()
|
||||
|
||||
pointerToIndex := func(c *Certificate) int {
|
||||
for i := range config.Certificates {
|
||||
if c == &config.Certificates[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
if n := pointerToIndex(config.getCertificateForName("example.com")); n != 0 {
|
||||
t.Errorf("example.com returned certificate %d, not 0", n)
|
||||
}
|
||||
if n := pointerToIndex(config.getCertificateForName("bar.example.com")); n != 1 {
|
||||
t.Errorf("bar.example.com returned certificate %d, not 1", n)
|
||||
}
|
||||
if n := pointerToIndex(config.getCertificateForName("foo.example.com")); n != 2 {
|
||||
t.Errorf("foo.example.com returned certificate %d, not 2", n)
|
||||
}
|
||||
if n := pointerToIndex(config.getCertificateForName("foo.bar.example.com")); n != 3 {
|
||||
t.Errorf("foo.bar.example.com returned certificate %d, not 3", n)
|
||||
}
|
||||
if n := pointerToIndex(config.getCertificateForName("foo.bar.baz.example.com")); n != 0 {
|
||||
t.Errorf("foo.bar.baz.example.com returned certificate %d, not 0", n)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,246 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tls
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
var tests = []interface{}{
|
||||
&clientHelloMsg{},
|
||||
&serverHelloMsg{},
|
||||
&finishedMsg{},
|
||||
|
||||
&certificateMsg{},
|
||||
&certificateRequestMsg{},
|
||||
&certificateVerifyMsg{},
|
||||
&certificateStatusMsg{},
|
||||
&clientKeyExchangeMsg{},
|
||||
&nextProtoMsg{},
|
||||
&newSessionTicketMsg{},
|
||||
&sessionState{},
|
||||
}
|
||||
|
||||
type testMessage interface {
|
||||
marshal() []byte
|
||||
unmarshal([]byte) bool
|
||||
equal(interface{}) bool
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshal(t *testing.T) {
|
||||
rand := rand.New(rand.NewSource(0))
|
||||
|
||||
for i, iface := range tests {
|
||||
ty := reflect.ValueOf(iface).Type()
|
||||
|
||||
n := 100
|
||||
if testing.Short() {
|
||||
n = 5
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
v, ok := quick.Value(ty, rand)
|
||||
if !ok {
|
||||
t.Errorf("#%d: failed to create value", i)
|
||||
break
|
||||
}
|
||||
|
||||
m1 := v.Interface().(testMessage)
|
||||
marshaled := m1.marshal()
|
||||
m2 := iface.(testMessage)
|
||||
if !m2.unmarshal(marshaled) {
|
||||
t.Errorf("#%d failed to unmarshal %#v %x", i, m1, marshaled)
|
||||
break
|
||||
}
|
||||
m2.marshal() // to fill any marshal cache in the message
|
||||
|
||||
if !m1.equal(m2) {
|
||||
t.Errorf("#%d got:%#v want:%#v %x", i, m2, m1, marshaled)
|
||||
break
|
||||
}
|
||||
|
||||
if i >= 3 {
|
||||
// The first three message types (ClientHello,
|
||||
// ServerHello and Finished) are allowed to
|
||||
// have parsable prefixes because the extension
|
||||
// data is optional and the length of the
|
||||
// Finished varies across versions.
|
||||
for j := 0; j < len(marshaled); j++ {
|
||||
if m2.unmarshal(marshaled[0:j]) {
|
||||
t.Errorf("#%d unmarshaled a prefix of length %d of %#v", i, j, m1)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzz(t *testing.T) {
|
||||
rand := rand.New(rand.NewSource(0))
|
||||
for _, iface := range tests {
|
||||
m := iface.(testMessage)
|
||||
|
||||
for j := 0; j < 1000; j++ {
|
||||
len := rand.Intn(100)
|
||||
bytes := randomBytes(len, rand)
|
||||
// This just looks for crashes due to bounds errors etc.
|
||||
m.unmarshal(bytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func randomBytes(n int, rand *rand.Rand) []byte {
|
||||
r := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r[i] = byte(rand.Int31())
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func randomString(n int, rand *rand.Rand) string {
|
||||
b := randomBytes(n, rand)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (*clientHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &clientHelloMsg{}
|
||||
m.vers = uint16(rand.Intn(65536))
|
||||
m.random = randomBytes(32, rand)
|
||||
m.sessionId = randomBytes(rand.Intn(32), rand)
|
||||
m.cipherSuites = make([]uint16, rand.Intn(63)+1)
|
||||
for i := 0; i < len(m.cipherSuites); i++ {
|
||||
m.cipherSuites[i] = uint16(rand.Int31())
|
||||
}
|
||||
m.compressionMethods = randomBytes(rand.Intn(63)+1, rand)
|
||||
if rand.Intn(10) > 5 {
|
||||
m.nextProtoNeg = true
|
||||
}
|
||||
if rand.Intn(10) > 5 {
|
||||
m.serverName = randomString(rand.Intn(255), rand)
|
||||
}
|
||||
m.ocspStapling = rand.Intn(10) > 5
|
||||
m.supportedPoints = randomBytes(rand.Intn(5)+1, rand)
|
||||
m.supportedCurves = make([]uint16, rand.Intn(5)+1)
|
||||
for i := range m.supportedCurves {
|
||||
m.supportedCurves[i] = uint16(rand.Intn(30000))
|
||||
}
|
||||
if rand.Intn(10) > 5 {
|
||||
m.ticketSupported = true
|
||||
if rand.Intn(10) > 5 {
|
||||
m.sessionTicket = randomBytes(rand.Intn(300), rand)
|
||||
}
|
||||
}
|
||||
if rand.Intn(10) > 5 {
|
||||
m.signatureAndHashes = supportedSKXSignatureAlgorithms
|
||||
}
|
||||
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*serverHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &serverHelloMsg{}
|
||||
m.vers = uint16(rand.Intn(65536))
|
||||
m.random = randomBytes(32, rand)
|
||||
m.sessionId = randomBytes(rand.Intn(32), rand)
|
||||
m.cipherSuite = uint16(rand.Int31())
|
||||
m.compressionMethod = uint8(rand.Intn(256))
|
||||
|
||||
if rand.Intn(10) > 5 {
|
||||
m.nextProtoNeg = true
|
||||
|
||||
n := rand.Intn(10)
|
||||
m.nextProtos = make([]string, n)
|
||||
for i := 0; i < n; i++ {
|
||||
m.nextProtos[i] = randomString(20, rand)
|
||||
}
|
||||
}
|
||||
|
||||
if rand.Intn(10) > 5 {
|
||||
m.ocspStapling = true
|
||||
}
|
||||
if rand.Intn(10) > 5 {
|
||||
m.ticketSupported = true
|
||||
}
|
||||
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*certificateMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &certificateMsg{}
|
||||
numCerts := rand.Intn(20)
|
||||
m.certificates = make([][]byte, numCerts)
|
||||
for i := 0; i < numCerts; i++ {
|
||||
m.certificates[i] = randomBytes(rand.Intn(10)+1, rand)
|
||||
}
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*certificateRequestMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &certificateRequestMsg{}
|
||||
m.certificateTypes = randomBytes(rand.Intn(5)+1, rand)
|
||||
numCAs := rand.Intn(100)
|
||||
m.certificateAuthorities = make([][]byte, numCAs)
|
||||
for i := 0; i < numCAs; i++ {
|
||||
m.certificateAuthorities[i] = randomBytes(rand.Intn(15)+1, rand)
|
||||
}
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*certificateVerifyMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &certificateVerifyMsg{}
|
||||
m.signature = randomBytes(rand.Intn(15)+1, rand)
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*certificateStatusMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &certificateStatusMsg{}
|
||||
if rand.Intn(10) > 5 {
|
||||
m.statusType = statusTypeOCSP
|
||||
m.response = randomBytes(rand.Intn(10)+1, rand)
|
||||
} else {
|
||||
m.statusType = 42
|
||||
}
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*clientKeyExchangeMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &clientKeyExchangeMsg{}
|
||||
m.ciphertext = randomBytes(rand.Intn(1000)+1, rand)
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*finishedMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &finishedMsg{}
|
||||
m.verifyData = randomBytes(12, rand)
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*nextProtoMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &nextProtoMsg{}
|
||||
m.proto = randomString(rand.Intn(255), rand)
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*newSessionTicketMsg) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
m := &newSessionTicketMsg{}
|
||||
m.ticket = randomBytes(rand.Intn(4), rand)
|
||||
return reflect.ValueOf(m)
|
||||
}
|
||||
|
||||
func (*sessionState) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
s := &sessionState{}
|
||||
s.vers = uint16(rand.Intn(10000))
|
||||
s.cipherSuite = uint16(rand.Intn(10000))
|
||||
s.masterSecret = randomBytes(rand.Intn(100), rand)
|
||||
numCerts := rand.Intn(20)
|
||||
s.certificates = make([][]byte, numCerts)
|
||||
for i := 0; i < numCerts; i++ {
|
||||
s.certificates[i] = randomBytes(rand.Intn(10)+1, rand)
|
||||
}
|
||||
return reflect.ValueOf(s)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,126 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tls
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testSplitPreMasterSecretTest struct {
|
||||
in, out1, out2 string
|
||||
}
|
||||
|
||||
var testSplitPreMasterSecretTests = []testSplitPreMasterSecretTest{
|
||||
{"", "", ""},
|
||||
{"00", "00", "00"},
|
||||
{"0011", "00", "11"},
|
||||
{"001122", "0011", "1122"},
|
||||
{"00112233", "0011", "2233"},
|
||||
}
|
||||
|
||||
func TestSplitPreMasterSecret(t *testing.T) {
|
||||
for i, test := range testSplitPreMasterSecretTests {
|
||||
in, _ := hex.DecodeString(test.in)
|
||||
out1, out2 := splitPreMasterSecret(in)
|
||||
s1 := hex.EncodeToString(out1)
|
||||
s2 := hex.EncodeToString(out2)
|
||||
if s1 != test.out1 || s2 != test.out2 {
|
||||
t.Errorf("#%d: got: (%s, %s) want: (%s, %s)", i, s1, s2, test.out1, test.out2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testKeysFromTest struct {
|
||||
version uint16
|
||||
preMasterSecret string
|
||||
clientRandom, serverRandom string
|
||||
masterSecret string
|
||||
clientMAC, serverMAC string
|
||||
clientKey, serverKey string
|
||||
macLen, keyLen int
|
||||
}
|
||||
|
||||
func TestKeysFromPreMasterSecret(t *testing.T) {
|
||||
for i, test := range testKeysFromTests {
|
||||
in, _ := hex.DecodeString(test.preMasterSecret)
|
||||
clientRandom, _ := hex.DecodeString(test.clientRandom)
|
||||
serverRandom, _ := hex.DecodeString(test.serverRandom)
|
||||
|
||||
masterSecret := masterFromPreMasterSecret(test.version, in, clientRandom, serverRandom)
|
||||
if s := hex.EncodeToString(masterSecret); s != test.masterSecret {
|
||||
t.Errorf("#%d: bad master secret %s, want %s", i, s, test.masterSecret)
|
||||
continue
|
||||
}
|
||||
|
||||
clientMAC, serverMAC, clientKey, serverKey, _, _ := keysFromMasterSecret(test.version, masterSecret, clientRandom, serverRandom, test.macLen, test.keyLen, 0)
|
||||
clientMACString := hex.EncodeToString(clientMAC)
|
||||
serverMACString := hex.EncodeToString(serverMAC)
|
||||
clientKeyString := hex.EncodeToString(clientKey)
|
||||
serverKeyString := hex.EncodeToString(serverKey)
|
||||
if clientMACString != test.clientMAC ||
|
||||
serverMACString != test.serverMAC ||
|
||||
clientKeyString != test.clientKey ||
|
||||
serverKeyString != test.serverKey {
|
||||
t.Errorf("#%d: got: (%s, %s, %s, %s) want: (%s, %s, %s, %s)", i, clientMACString, serverMACString, clientKeyString, serverKeyString, test.clientMAC, test.serverMAC, test.clientKey, test.serverKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// These test vectors were generated from GnuTLS using `gnutls-cli --insecure -d 9 `
|
||||
var testKeysFromTests = []testKeysFromTest{
|
||||
{
|
||||
VersionTLS10,
|
||||
"0302cac83ad4b1db3b9ab49ad05957de2a504a634a386fc600889321e1a971f57479466830ac3e6f468e87f5385fa0c5",
|
||||
"4ae66303755184a3917fcb44880605fcc53baa01912b22ed94473fc69cebd558",
|
||||
"4ae663020ec16e6bb5130be918cfcafd4d765979a3136a5d50c593446e4e44db",
|
||||
"3d851bab6e5556e959a16bc36d66cfae32f672bfa9ecdef6096cbb1b23472df1da63dbbd9827606413221d149ed08ceb",
|
||||
"805aaa19b3d2c0a0759a4b6c9959890e08480119",
|
||||
"2d22f9fe519c075c16448305ceee209fc24ad109",
|
||||
"d50b5771244f850cd8117a9ccafe2cf1",
|
||||
"e076e33206b30507a85c32855acd0919",
|
||||
20,
|
||||
16,
|
||||
},
|
||||
{
|
||||
VersionTLS10,
|
||||
"03023f7527316bc12cbcd69e4b9e8275d62c028f27e65c745cfcddc7ce01bd3570a111378b63848127f1c36e5f9e4890",
|
||||
"4ae66364b5ea56b20ce4e25555aed2d7e67f42788dd03f3fee4adae0459ab106",
|
||||
"4ae66363ab815cbf6a248b87d6b556184e945e9b97fbdf247858b0bdafacfa1c",
|
||||
"7d64be7c80c59b740200b4b9c26d0baaa1c5ae56705acbcf2307fe62beb4728c19392c83f20483801cce022c77645460",
|
||||
"97742ed60a0554ca13f04f97ee193177b971e3b0",
|
||||
"37068751700400e03a8477a5c7eec0813ab9e0dc",
|
||||
"207cddbc600d2a200abac6502053ee5c",
|
||||
"df3f94f6e1eacc753b815fe16055cd43",
|
||||
20,
|
||||
16,
|
||||
},
|
||||
{
|
||||
VersionTLS10,
|
||||
"832d515f1d61eebb2be56ba0ef79879efb9b527504abb386fb4310ed5d0e3b1f220d3bb6b455033a2773e6d8bdf951d278a187482b400d45deb88a5d5a6bb7d6a7a1decc04eb9ef0642876cd4a82d374d3b6ff35f0351dc5d411104de431375355addc39bfb1f6329fb163b0bc298d658338930d07d313cd980a7e3d9196cac1",
|
||||
"4ae663b2ee389c0de147c509d8f18f5052afc4aaf9699efe8cb05ece883d3a5e",
|
||||
"4ae664d503fd4cff50cfc1fb8fc606580f87b0fcdac9554ba0e01d785bdf278e",
|
||||
"1aff2e7a2c4279d0126f57a65a77a8d9d0087cf2733366699bec27eb53d5740705a8574bb1acc2abbe90e44f0dd28d6c",
|
||||
"3c7647c93c1379a31a609542aa44e7f117a70085",
|
||||
"0d73102994be74a575a3ead8532590ca32a526d4",
|
||||
"ac7581b0b6c10d85bbd905ffbf36c65e",
|
||||
"ff07edde49682b45466bd2e39464b306",
|
||||
20,
|
||||
16,
|
||||
},
|
||||
{
|
||||
VersionSSL30,
|
||||
"832d515f1d61eebb2be56ba0ef79879efb9b527504abb386fb4310ed5d0e3b1f220d3bb6b455033a2773e6d8bdf951d278a187482b400d45deb88a5d5a6bb7d6a7a1decc04eb9ef0642876cd4a82d374d3b6ff35f0351dc5d411104de431375355addc39bfb1f6329fb163b0bc298d658338930d07d313cd980a7e3d9196cac1",
|
||||
"4ae663b2ee389c0de147c509d8f18f5052afc4aaf9699efe8cb05ece883d3a5e",
|
||||
"4ae664d503fd4cff50cfc1fb8fc606580f87b0fcdac9554ba0e01d785bdf278e",
|
||||
"a614863e56299dcffeea2938f22c2ba023768dbe4b3f6877bc9c346c6ae529b51d9cb87ff9695ea4d01f2205584405b2",
|
||||
"2c450d5b6f6e2013ac6bea6a0b32200d4e1ffb94",
|
||||
"7a7a7438769536f2fb1ae49a61f0703b79b2dc53",
|
||||
"f8f6b26c10f12855c9aafb1e0e839ccf",
|
||||
"2b9d4b4a60cb7f396780ebff50650419",
|
||||
20,
|
||||
16,
|
||||
},
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tls
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var rsaCertPEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIB0zCCAX2gAwIBAgIJAI/M7BYjwB+uMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
|
||||
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
|
||||
aWRnaXRzIFB0eSBMdGQwHhcNMTIwOTEyMjE1MjAyWhcNMTUwOTEyMjE1MjAyWjBF
|
||||
MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
|
||||
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANLJ
|
||||
hPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wok/4xIA+ui35/MmNa
|
||||
rtNuC+BdZ1tMuVCPFZcCAwEAAaNQME4wHQYDVR0OBBYEFJvKs8RfJaXTH08W+SGv
|
||||
zQyKn0H8MB8GA1UdIwQYMBaAFJvKs8RfJaXTH08W+SGvzQyKn0H8MAwGA1UdEwQF
|
||||
MAMBAf8wDQYJKoZIhvcNAQEFBQADQQBJlffJHybjDGxRMqaRmDhX0+6v02TUKZsW
|
||||
r5QuVbpQhH6u+0UgcW0jp9QwpxoPTLTWGXEWBBBurxFwiCBhkQ+V
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
|
||||
var rsaKeyPEM = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo
|
||||
k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G
|
||||
6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N
|
||||
MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW
|
||||
SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T
|
||||
xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi
|
||||
D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
`
|
||||
|
||||
// keyPEM is the same as rsaKeyPEM, but declares itself as just
|
||||
// "PRIVATE KEY", not "RSA PRIVATE KEY". http://golang.org/issue/4477
|
||||
var keyPEM = `-----BEGIN PRIVATE KEY-----
|
||||
MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo
|
||||
k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G
|
||||
6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N
|
||||
MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW
|
||||
SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T
|
||||
xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi
|
||||
D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g==
|
||||
-----END PRIVATE KEY-----
|
||||
`
|
||||
|
||||
var ecdsaCertPEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIB/jCCAWICCQDscdUxw16XFDAJBgcqhkjOPQQBMEUxCzAJBgNVBAYTAkFVMRMw
|
||||
EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0
|
||||
eSBMdGQwHhcNMTIxMTE0MTI0MDQ4WhcNMTUxMTE0MTI0MDQ4WjBFMQswCQYDVQQG
|
||||
EwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lk
|
||||
Z2l0cyBQdHkgTHRkMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBY9+my9OoeSUR
|
||||
lDQdV/x8LsOuLilthhiS1Tz4aGDHIPwC1mlvnf7fg5lecYpMCrLLhauAc1UJXcgl
|
||||
01xoLuzgtAEAgv2P/jgytzRSpUYvgLBt1UA0leLYBy6mQQbrNEuqT3INapKIcUv8
|
||||
XxYP0xMEUksLPq6Ca+CRSqTtrd/23uTnapkwCQYHKoZIzj0EAQOBigAwgYYCQXJo
|
||||
A7Sl2nLVf+4Iu/tAX/IF4MavARKC4PPHK3zfuGfPR3oCCcsAoz3kAzOeijvd0iXb
|
||||
H5jBImIxPL4WxQNiBTexAkF8D1EtpYuWdlVQ80/h/f4pBcGiXPqX5h2PQSQY7hP1
|
||||
+jwM1FGS4fREIOvlBYr/SzzQRtwrvrzGYxDEDbsC0ZGRnA==
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
|
||||
var ecdsaKeyPEM = `-----BEGIN EC PARAMETERS-----
|
||||
BgUrgQQAIw==
|
||||
-----END EC PARAMETERS-----
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIHcAgEBBEIBrsoKp0oqcv6/JovJJDoDVSGWdirrkgCWxrprGlzB9o0X8fV675X0
|
||||
NwuBenXFfeZvVcwluO7/Q9wkYoPd/t3jGImgBwYFK4EEACOhgYkDgYYABAFj36bL
|
||||
06h5JRGUNB1X/Hwuw64uKW2GGJLVPPhoYMcg/ALWaW+d/t+DmV5xikwKssuFq4Bz
|
||||
VQldyCXTXGgu7OC0AQCC/Y/+ODK3NFKlRi+AsG3VQDSV4tgHLqZBBus0S6pPcg1q
|
||||
kohxS/xfFg/TEwRSSws+roJr4JFKpO2t3/be5OdqmQ==
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
|
||||
var keyPairTests = []struct {
|
||||
algo string
|
||||
cert string
|
||||
key string
|
||||
}{
|
||||
{"ECDSA", ecdsaCertPEM, ecdsaKeyPEM},
|
||||
{"RSA", rsaCertPEM, rsaKeyPEM},
|
||||
{"RSA-untyped", rsaCertPEM, keyPEM}, // golang.org/issue/4477
|
||||
}
|
||||
|
||||
func TestX509KeyPair(t *testing.T) {
|
||||
var pem []byte
|
||||
for _, test := range keyPairTests {
|
||||
pem = []byte(test.cert + test.key)
|
||||
if _, err := X509KeyPair(pem, pem); err != nil {
|
||||
t.Errorf("Failed to load %s cert followed by %s key: %s", test.algo, test.algo, err)
|
||||
}
|
||||
pem = []byte(test.key + test.cert)
|
||||
if _, err := X509KeyPair(pem, pem); err != nil {
|
||||
t.Errorf("Failed to load %s key followed by %s cert: %s", test.algo, test.algo, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestX509MixedKeyPair(t *testing.T) {
|
||||
if _, err := X509KeyPair([]byte(rsaCertPEM), []byte(ecdsaKeyPEM)); err == nil {
|
||||
t.Error("Load of RSA certificate succeeded with ECDSA private key")
|
||||
}
|
||||
if _, err := X509KeyPair([]byte(ecdsaCertPEM), []byte(rsaKeyPEM)); err == nil {
|
||||
t.Error("Load of ECDSA certificate succeeded with RSA private key")
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
package management_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/management"
|
||||
)
|
||||
|
||||
// TestIsResourceNotFoundError tests IsResourceNotFoundError with the
|
||||
// set of given test cases.
|
||||
func TestIsResourceNotFoundError(t *testing.T) {
|
||||
// isResourceNotFoundTestCases is a set of structs comprising of the error
|
||||
// IsResourceNotFoundError should test and the expected result.
|
||||
var isResourceNotFoundTestCases = []struct {
|
||||
err error
|
||||
expected bool
|
||||
}{
|
||||
{nil, false},
|
||||
{fmt.Errorf("Some other random error."), false},
|
||||
{management.AzureError{Code: "ResourceNotFound"}, true},
|
||||
{management.AzureError{Code: "NotAResourceNotFound"}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range isResourceNotFoundTestCases {
|
||||
if res := management.IsResourceNotFoundError(testCase.err); res != testCase.expected {
|
||||
t.Fatalf("Test %d: error %s - expected %t - got %t", i+1, testCase.err, testCase.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
31
vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities_test.go
generated
vendored
31
vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities_test.go
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
package storageservice
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_StorageServiceKeysResponse_Unmarshal(t *testing.T) {
|
||||
// from https://msdn.microsoft.com/en-us/library/azure/ee460785.aspx
|
||||
response := []byte(`<?xml version="1.0" encoding="utf-8"?>
|
||||
<StorageService xmlns="http://schemas.microsoft.com/windowsazure">
|
||||
<Url>storage-service-url</Url>
|
||||
<StorageServiceKeys>
|
||||
<Primary>primary-key</Primary>
|
||||
<Secondary>secondary-key</Secondary>
|
||||
</StorageServiceKeys>
|
||||
</StorageService>`)
|
||||
|
||||
keysResponse := GetStorageServiceKeysResponse{}
|
||||
err := xml.Unmarshal(response, &keysResponse)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := "primary-key"; keysResponse.PrimaryKey != expected {
|
||||
t.Fatalf("Expected %q but got %q", expected, keysResponse.PrimaryKey)
|
||||
}
|
||||
if expected := "secondary-key"; keysResponse.SecondaryKey != expected {
|
||||
t.Fatalf("Expected %q but got %q", expected, keysResponse.SecondaryKey)
|
||||
}
|
||||
}
|
299
vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go
generated
vendored
299
vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go
generated
vendored
|
@ -1,299 +0,0 @@
|
|||
package virtualmachine
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDocumentedDeploymentRequest(t *testing.T) {
|
||||
// xml based on https://msdn.microsoft.com/en-us/library/azure/jj157194.aspx
|
||||
// fixed typos, replaced strongly typed fields with values of correct type
|
||||
xmlString := `<Deployment xmlns="http://schemas.microsoft.com/windowsazure" xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<Name>name-of-deployment</Name>
|
||||
<DeploymentSlot>deployment-environment</DeploymentSlot>
|
||||
<Label>identifier-of-deployment</Label>
|
||||
<RoleList>
|
||||
<Role>
|
||||
<RoleName>name-of-the-virtual-machine</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets>
|
||||
<ConfigurationSet i:type="WindowsProvisioningConfigurationSet">
|
||||
<ConfigurationSetType>WindowsProvisioningConfiguration</ConfigurationSetType>
|
||||
<ComputerName>name-of-computer</ComputerName>
|
||||
<AdminPassword>administrator-password</AdminPassword>
|
||||
<EnableAutomaticUpdates>true</EnableAutomaticUpdates>
|
||||
<TimeZone>time-zone</TimeZone>
|
||||
<DomainJoin>
|
||||
<Credentials>
|
||||
<Domain>domain-to-join</Domain>
|
||||
<Username>user-name-in-the-domain</Username>
|
||||
<Password>password-for-the-user-name</Password>
|
||||
</Credentials>
|
||||
<JoinDomain>domain-to-join</JoinDomain>
|
||||
<MachineObjectOU>distinguished-name-of-the-ou</MachineObjectOU>
|
||||
</DomainJoin>
|
||||
<StoredCertificateSettings>
|
||||
<CertificateSetting>
|
||||
<StoreLocation>LocalMachine</StoreLocation>
|
||||
<StoreName>name-of-store-on-the-machine</StoreName>
|
||||
<Thumbprint>certificate-thumbprint</Thumbprint>
|
||||
</CertificateSetting>
|
||||
</StoredCertificateSettings>
|
||||
<WinRM>
|
||||
<Listeners>
|
||||
<Listener>
|
||||
<Protocol>listener-protocol</Protocol>
|
||||
</Listener>
|
||||
<Listener>
|
||||
<CertificateThumbprint>certificate-thumbprint</CertificateThumbprint>
|
||||
<Protocol>listener-protocol</Protocol>
|
||||
</Listener>
|
||||
</Listeners>
|
||||
</WinRM>
|
||||
<AdminUsername>name-of-administrator-account</AdminUsername>
|
||||
<CustomData>base-64-encoded-data</CustomData>
|
||||
<AdditionalUnattendContent>
|
||||
<Passes>
|
||||
<UnattendPass>
|
||||
<PassName>name-of-pass</PassName>
|
||||
<Components>
|
||||
<UnattendComponent>
|
||||
<ComponentName>name-of-component</ComponentName>
|
||||
<ComponentSettings>
|
||||
<ComponentSetting>
|
||||
<SettingName>name-of-setting</SettingName>
|
||||
<Content>base-64-encoded-XML-content</Content>
|
||||
</ComponentSetting>
|
||||
</ComponentSettings>
|
||||
</UnattendComponent>
|
||||
</Components>
|
||||
</UnattendPass>
|
||||
</Passes>
|
||||
</AdditionalUnattendContent>
|
||||
</ConfigurationSet>
|
||||
<ConfigurationSet i:type="LinuxProvisioningConfigurationSet">
|
||||
<ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
|
||||
<HostName>host-name-for-the-virtual-machine</HostName>
|
||||
<UserName>new-user-name</UserName>
|
||||
<UserPassword>password-for-the-new-user</UserPassword>
|
||||
<DisableSshPasswordAuthentication>true</DisableSshPasswordAuthentication>
|
||||
<SSH>
|
||||
<PublicKeys>
|
||||
<PublicKey>
|
||||
<FingerPrint>certificate-fingerprint</FingerPrint>
|
||||
<Path>SSH-public-key-storage-location</Path>
|
||||
</PublicKey>
|
||||
</PublicKeys>
|
||||
<KeyPairs>
|
||||
<KeyPair>
|
||||
<FingerPrint>certificate-fingerprint</FingerPrint>
|
||||
<Path>SSH-public-key-storage-location</Path>
|
||||
</KeyPair>
|
||||
</KeyPairs>
|
||||
</SSH>
|
||||
<CustomData>base-64-encoded-data</CustomData>
|
||||
</ConfigurationSet>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>NetworkConfiguration</ConfigurationSetType>
|
||||
<InputEndpoints>
|
||||
<InputEndpoint>
|
||||
<LoadBalancedEndpointSetName>name-of-load-balanced-set</LoadBalancedEndpointSetName>
|
||||
<LocalPort>22</LocalPort>
|
||||
<Name>ZZH</Name>
|
||||
<Port>33</Port>
|
||||
<LoadBalancerProbe>
|
||||
<Path>/probe/me</Path>
|
||||
<Port>80</Port>
|
||||
<Protocol>http</Protocol>
|
||||
<IntervalInSeconds>30</IntervalInSeconds>
|
||||
<TimeoutInSeconds>5</TimeoutInSeconds>
|
||||
</LoadBalancerProbe>
|
||||
<Protocol>endpoint-protocol</Protocol>
|
||||
<EnableDirectServerReturn>enable-direct-server-return</EnableDirectServerReturn>
|
||||
<EndpointACL>
|
||||
<Rules>
|
||||
<Rule>
|
||||
<Order>priority-of-the-rule</Order>
|
||||
<Action>permit-rule</Action>
|
||||
<RemoteSubnet>subnet-of-the-rule</RemoteSubnet>
|
||||
<Description>description-of-the-rule</Description>
|
||||
</Rule>
|
||||
</Rules>
|
||||
</EndpointACL>
|
||||
<LoadBalancerName>name-of-internal-loadbalancer</LoadBalancerName>
|
||||
<IdleTimeoutInMinutes>9</IdleTimeoutInMinutes>
|
||||
</InputEndpoint>
|
||||
</InputEndpoints>
|
||||
<SubnetNames>
|
||||
<SubnetName>name-of-subnet</SubnetName>
|
||||
</SubnetNames>
|
||||
<StaticVirtualNetworkIPAddress>ip-address</StaticVirtualNetworkIPAddress>
|
||||
<PublicIPs>
|
||||
<PublicIP>
|
||||
<Name>name-of-public-ip</Name>
|
||||
<IdleTimeoutInMinutes>11</IdleTimeoutInMinutes>
|
||||
</PublicIP>
|
||||
</PublicIPs>
|
||||
</ConfigurationSet>
|
||||
</ConfigurationSets>
|
||||
<ResourceExtensionReferences>
|
||||
<ResourceExtensionReference>
|
||||
<ReferenceName>name-of-reference</ReferenceName>
|
||||
<Publisher>name-of-publisher</Publisher>
|
||||
<Name>name-of-extension</Name>
|
||||
<Version>version-of-extension</Version>
|
||||
<ResourceExtensionParameterValues>
|
||||
<ResourceExtensionParameterValue>
|
||||
<Key>name-of-parameter-key</Key>
|
||||
<Value>parameter-value</Value>
|
||||
<Type>type-of-parameter</Type>
|
||||
</ResourceExtensionParameterValue>
|
||||
</ResourceExtensionParameterValues>
|
||||
<State>state-of-resource</State>
|
||||
<Certificates>
|
||||
<Certificate>
|
||||
<Thumbprint>certificate-thumbprint</Thumbprint>
|
||||
<ThumbprintAlgorithm>certificate-algorithm</ThumbprintAlgorithm>
|
||||
</Certificate>
|
||||
</Certificates>
|
||||
</ResourceExtensionReference>
|
||||
</ResourceExtensionReferences>
|
||||
<VMImageName>name-of-vm-image</VMImageName>
|
||||
<MediaLocation>path-to-vhd</MediaLocation>
|
||||
<AvailabilitySetName>name-of-availability-set</AvailabilitySetName>
|
||||
<DataVirtualHardDisks>
|
||||
<DataVirtualHardDisk>
|
||||
<HostCaching>caching-mode</HostCaching>
|
||||
<DiskLabel>label-of-data-disk</DiskLabel>
|
||||
<DiskName>name-of-disk</DiskName>
|
||||
<Lun>0</Lun>
|
||||
<LogicalDiskSizeInGB>50</LogicalDiskSizeInGB>
|
||||
<MediaLink>path-to-vhd</MediaLink>
|
||||
</DataVirtualHardDisk>
|
||||
</DataVirtualHardDisks>
|
||||
<OSVirtualHardDisk>
|
||||
<HostCaching>caching-mode</HostCaching>
|
||||
<DiskLabel>label-of-operating-system-disk</DiskLabel>
|
||||
<DiskName>name-of-disk</DiskName>
|
||||
<MediaLink>path-to-vhd</MediaLink>
|
||||
<SourceImageName>name-of-source-image</SourceImageName>
|
||||
<OS>operating-system-of-image</OS>
|
||||
<RemoteSourceImageLink>path-to-source-image</RemoteSourceImageLink>
|
||||
<ResizedSizeInGB>125</ResizedSizeInGB>
|
||||
</OSVirtualHardDisk>
|
||||
<RoleSize>size-of-virtual-machine</RoleSize>
|
||||
<ProvisionGuestAgent>true</ProvisionGuestAgent>
|
||||
<VMImageInput>
|
||||
<OSDiskConfiguration>
|
||||
<ResizedSizeInGB>126</ResizedSizeInGB>
|
||||
</OSDiskConfiguration>
|
||||
<DataDiskConfigurations>
|
||||
<DataDiskConfiguration>
|
||||
<Name>disk-name</Name>
|
||||
<ResizedSizeInGB>127</ResizedSizeInGB>
|
||||
</DataDiskConfiguration>
|
||||
</DataDiskConfigurations>
|
||||
</VMImageInput>
|
||||
</Role>
|
||||
</RoleList>
|
||||
<VirtualNetworkName>name-of-virtual-network</VirtualNetworkName>
|
||||
<Dns>
|
||||
<DnsServers>
|
||||
<DnsServer>
|
||||
<Name>dns-name</Name>
|
||||
<Address>dns-ip-address</Address>
|
||||
</DnsServer>
|
||||
</DnsServers>
|
||||
</Dns>
|
||||
<ReservedIPName>name-of-reserved-ip</ReservedIPName>
|
||||
<LoadBalancers>
|
||||
<LoadBalancer>
|
||||
<Name>name-of-internal-load-balancer</Name>
|
||||
<FrontendIpConfiguration>
|
||||
<Type>Private</Type>
|
||||
<SubnetName>name-of-subnet</SubnetName>
|
||||
<StaticVirtualNetworkIPAddress>static-ip-address</StaticVirtualNetworkIPAddress>
|
||||
</FrontendIpConfiguration>
|
||||
</LoadBalancer>
|
||||
</LoadBalancers>
|
||||
</Deployment>`
|
||||
|
||||
deployment := DeploymentRequest{}
|
||||
if err := xml.Unmarshal([]byte(xmlString), &deployment); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if deployment.Name != "name-of-deployment" {
|
||||
t.Fatalf("Expected deployment.Name=\"name-of-deployment\", but got \"%s\"",
|
||||
deployment.Name)
|
||||
}
|
||||
|
||||
// ======
|
||||
|
||||
t.Logf("deployment.RoleList[0]: %+v", deployment.RoleList[0])
|
||||
if expected := "name-of-the-virtual-machine"; deployment.RoleList[0].RoleName != expected {
|
||||
t.Fatalf("Expected deployment.RoleList[0].RoleName=%v, but got %v", expected, deployment.RoleList[0].RoleName)
|
||||
}
|
||||
|
||||
// ======
|
||||
|
||||
t.Logf("deployment.DNSServers[0]: %+v", deployment.DNSServers[0])
|
||||
if deployment.DNSServers[0].Name != "dns-name" {
|
||||
t.Fatalf("Expected deployment.DNSServers[0].Name=\"dns-name\", but got \"%s\"",
|
||||
deployment.DNSServers[0].Name)
|
||||
}
|
||||
|
||||
// ======
|
||||
|
||||
t.Logf("deployment.LoadBalancers[0]: %+v", deployment.LoadBalancers[0])
|
||||
if deployment.LoadBalancers[0].Name != "name-of-internal-load-balancer" {
|
||||
t.Fatalf("Expected deployment.LoadBalancers[0].Name=\"name-of-internal-load-balancer\", but got \"%s\"",
|
||||
deployment.LoadBalancers[0].Name)
|
||||
}
|
||||
|
||||
if deployment.LoadBalancers[0].Type != IPAddressTypePrivate {
|
||||
t.Fatalf("Expected deployment.LoadBalancers[0].Type=IPAddressTypePrivate, but got \"%s\"",
|
||||
deployment.LoadBalancers[0].Type)
|
||||
}
|
||||
|
||||
if deployment.LoadBalancers[0].StaticVirtualNetworkIPAddress != "static-ip-address" {
|
||||
t.Fatalf("Expected deployment.LoadBalancers[0].StaticVirtualNetworkIPAddress=\"static-ip-address\", but got \"%s\"",
|
||||
deployment.LoadBalancers[0].StaticVirtualNetworkIPAddress)
|
||||
}
|
||||
|
||||
// ======
|
||||
|
||||
extensionReferences := (*deployment.RoleList[0].ResourceExtensionReferences)
|
||||
t.Logf("(*deployment.RoleList[0].ResourceExtensionReferences)[0]: %+v", extensionReferences[0])
|
||||
if extensionReferences[0].Name != "name-of-extension" {
|
||||
t.Fatalf("Expected (*deployment.RoleList[0].ResourceExtensionReferences)[0].Name=\"name-of-extension\", but got \"%s\"",
|
||||
extensionReferences[0].Name)
|
||||
}
|
||||
|
||||
if extensionReferences[0].ParameterValues[0].Key != "name-of-parameter-key" {
|
||||
t.Fatalf("Expected (*deployment.RoleList[0].ResourceExtensionReferences)[0].ParameterValues[0].Key=\"name-of-parameter-key\", but got %v",
|
||||
extensionReferences[0].ParameterValues[0].Key)
|
||||
}
|
||||
|
||||
// ======
|
||||
|
||||
if deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB != 127 {
|
||||
t.Fatalf("Expected deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB=127, but got %v",
|
||||
deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB)
|
||||
}
|
||||
|
||||
// ======
|
||||
|
||||
winRMlisteners := *deployment.RoleList[0].ConfigurationSets[0].WinRMListeners
|
||||
if string(winRMlisteners[0].Protocol) != "listener-protocol" {
|
||||
t.Fatalf("Expected winRMlisteners[0].Protocol to be listener-protocol, but got %s",
|
||||
string(winRMlisteners[0].Protocol))
|
||||
}
|
||||
|
||||
winRMlisteners2 := *deployment.RoleList[0].ConfigurationSets[0].WinRMListeners
|
||||
if winRMlisteners2[1].CertificateThumbprint != "certificate-thumbprint" {
|
||||
t.Fatalf("Expected winRMlisteners2[1].CertificateThumbprint to be certificate-thumbprint, but got %s",
|
||||
winRMlisteners2[1].CertificateThumbprint)
|
||||
}
|
||||
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package virtualmachine
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/management/testutils"
|
||||
)
|
||||
|
||||
func TestAzureGetResourceExtensions(t *testing.T) {
|
||||
client := testutils.GetTestClient(t)
|
||||
|
||||
list, err := NewClient(client).GetResourceExtensions()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("Found %d extensions", len(list))
|
||||
if len(list) == 0 {
|
||||
t.Fatal("Huh, no resource extensions at all? Something must be wrong.")
|
||||
}
|
||||
|
||||
for _, extension := range list {
|
||||
if extension.Name == "" {
|
||||
t.Fatalf("Resource with empty name? Something must have gone wrong with serialization: %+v", extension)
|
||||
}
|
||||
}
|
||||
}
|
110
vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go
generated
vendored
110
vendor/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go
generated
vendored
|
@ -1,110 +0,0 @@
|
|||
package virtualmachineimage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const xml1 = `
|
||||
<VMImage>
|
||||
<Name>imgName</Name>
|
||||
<Label>PackerMade_Ubuntu_Serv14</Label>
|
||||
<Category>User</Category>
|
||||
<Description>packer made image</Description>
|
||||
<OSDiskConfiguration>
|
||||
<Name>OSDisk</Name>
|
||||
<HostCaching>ReadWrite</HostCaching>
|
||||
<OSState>Generalized</OSState>
|
||||
<OS>Linux</OS>
|
||||
<MediaLink>https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12.vhd</MediaLink>
|
||||
<LogicalDiskSizeInGB>30</LogicalDiskSizeInGB>
|
||||
<IOType>Standard</IOType>
|
||||
</OSDiskConfiguration>
|
||||
<DataDiskConfigurations/>
|
||||
<ServiceName>PkrSrvf3mz03u4mi</ServiceName>
|
||||
<DeploymentName>PkrVMf3mz03u4mi</DeploymentName>
|
||||
<RoleName>PkrVMf3mz03u4mi</RoleName>
|
||||
<Location>Central US</Location>
|
||||
<CreatedTime>2015-12-12T08:59:29.1936858Z</CreatedTime>
|
||||
<ModifiedTime>2015-12-12T08:59:29.1936858Z</ModifiedTime>
|
||||
<ImageFamily>PackerMade</ImageFamily>
|
||||
<RecommendedVMSize>Small</RecommendedVMSize>
|
||||
<IsPremium>false</IsPremium>
|
||||
<VMImageState>VMImageReadyForUse</VMImageState>
|
||||
<RoleStateOnCapture>StoppedVM</RoleStateOnCapture>
|
||||
<RoleSizeOnCapture>Small</RoleSizeOnCapture>
|
||||
</VMImage>`
|
||||
const xml2 = `
|
||||
<VMImage>
|
||||
<Name>imgName</Name>
|
||||
<Label>PackerMade_Ubuntu_Serv14</Label>
|
||||
<Category>User</Category>
|
||||
<Description>packer made image</Description>
|
||||
<OSDiskConfiguration>
|
||||
<Name>OSDisk</Name>
|
||||
<HostCaching>ReadWrite</HostCaching>
|
||||
<OSState>Generalized</OSState>
|
||||
<OS>Linux</OS>
|
||||
<MediaLink>https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12.vhd</MediaLink>
|
||||
<LogicalDiskSizeInGB>30</LogicalDiskSizeInGB>
|
||||
<IOType>Standard</IOType>
|
||||
</OSDiskConfiguration>
|
||||
<DataDiskConfigurations>
|
||||
<DataDiskConfiguration>
|
||||
<Name>DataDisk1</Name>
|
||||
<HostCaching>ReadWrite</HostCaching>
|
||||
<MediaLink>https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12-dd1.vhd</MediaLink>
|
||||
<LogicalDiskSizeInGB>31</LogicalDiskSizeInGB>
|
||||
<IOType>Standard</IOType>
|
||||
</DataDiskConfiguration>
|
||||
<DataDiskConfiguration>
|
||||
<Name>DataDisk2</Name>
|
||||
<HostCaching>ReadWrite</HostCaching>
|
||||
<MediaLink>https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12-dd2.vhd</MediaLink>
|
||||
<LogicalDiskSizeInGB>32</LogicalDiskSizeInGB>
|
||||
<IOType>Standard</IOType>
|
||||
</DataDiskConfiguration>
|
||||
</DataDiskConfigurations>
|
||||
<ServiceName>PkrSrvf3mz03u4mi</ServiceName>
|
||||
<DeploymentName>PkrVMf3mz03u4mi</DeploymentName>
|
||||
<RoleName>PkrVMf3mz03u4mi</RoleName>
|
||||
<Location>Central US</Location>
|
||||
<CreatedTime>2015-12-12T08:59:29.1936858Z</CreatedTime>
|
||||
<ModifiedTime>2015-12-12T08:59:29.1936858Z</ModifiedTime>
|
||||
<ImageFamily>PackerMade</ImageFamily>
|
||||
<RecommendedVMSize>Small</RecommendedVMSize>
|
||||
<IsPremium>false</IsPremium>
|
||||
<VMImageState>VMImageReadyForUse</VMImageState>
|
||||
<RoleStateOnCapture>StoppedVM</RoleStateOnCapture>
|
||||
<RoleSizeOnCapture>Small</RoleSizeOnCapture>
|
||||
</VMImage>`
|
||||
|
||||
func Test_NoDataDisksUnmarshal(t *testing.T) {
|
||||
var image VMImage
|
||||
if err := xml.Unmarshal([]byte(xml1), &image); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := checker{t}
|
||||
check.Equal(0, len(image.DataDiskConfigurations))
|
||||
}
|
||||
|
||||
func Test_DataDiskCountUnmarshal(t *testing.T) {
|
||||
var image VMImage
|
||||
if err := xml.Unmarshal([]byte(xml2), &image); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := checker{t}
|
||||
check.Equal(2, len(image.DataDiskConfigurations))
|
||||
check.Equal("DataDisk1", image.DataDiskConfigurations[0].Name)
|
||||
check.Equal("DataDisk2", image.DataDiskConfigurations[1].Name)
|
||||
}
|
||||
|
||||
type checker struct{ *testing.T }
|
||||
|
||||
func (a *checker) Equal(expected, actual interface{}) {
|
||||
if expected != actual {
|
||||
a.T.Fatalf("Expected %q, but got %q", expected, actual)
|
||||
}
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
package vmutils
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/management"
|
||||
"github.com/Azure/azure-sdk-for-go/management/hostedservice"
|
||||
"github.com/Azure/azure-sdk-for-go/management/virtualmachine"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
dnsName := "test-vm-from-go"
|
||||
storageAccount := "mystorageaccount"
|
||||
location := "West US"
|
||||
vmSize := "Small"
|
||||
vmImage := "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB"
|
||||
userName := "testuser"
|
||||
userPassword := "Test123"
|
||||
|
||||
client, err := management.ClientFromPublishSettingsFile("path/to/downloaded.publishsettings", "")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// create hosted service
|
||||
if err := hostedservice.NewClient(client).CreateHostedService(hostedservice.CreateHostedServiceParameters{
|
||||
ServiceName: dnsName,
|
||||
Location: location,
|
||||
Label: base64.StdEncoding.EncodeToString([]byte(dnsName))}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// create virtual machine
|
||||
role := NewVMConfiguration(dnsName, vmSize)
|
||||
ConfigureDeploymentFromPlatformImage(
|
||||
&role,
|
||||
vmImage,
|
||||
fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", storageAccount, dnsName),
|
||||
"")
|
||||
ConfigureForLinux(&role, dnsName, userName, userPassword)
|
||||
ConfigureWithPublicSSH(&role)
|
||||
|
||||
operationID, err := virtualmachine.NewClient(client).
|
||||
CreateDeployment(role, dnsName, virtualmachine.CreateDeploymentOptions{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = client.WaitForOperation(operationID, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
42
vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go
generated
vendored
42
vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go
generated
vendored
|
@ -1,42 +0,0 @@
|
|||
package vmutils
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
|
||||
vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine"
|
||||
)
|
||||
|
||||
func Test_AddAzureVMExtensionConfiguration(t *testing.T) {
|
||||
|
||||
role := vm.Role{}
|
||||
AddAzureVMExtensionConfiguration(&role,
|
||||
"nameOfExtension", "nameOfPublisher", "versionOfExtension", "nameOfReference", "state", []byte{1, 2, 3}, []byte{})
|
||||
|
||||
data, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if expected := `<Role>
|
||||
<ConfigurationSets></ConfigurationSets>
|
||||
<ResourceExtensionReferences>
|
||||
<ResourceExtensionReference>
|
||||
<ReferenceName>nameOfReference</ReferenceName>
|
||||
<Publisher>nameOfPublisher</Publisher>
|
||||
<Name>nameOfExtension</Name>
|
||||
<Version>versionOfExtension</Version>
|
||||
<ResourceExtensionParameterValues>
|
||||
<ResourceExtensionParameterValue>
|
||||
<Key>ignored</Key>
|
||||
<Value>AQID</Value>
|
||||
<Type>Public</Type>
|
||||
</ResourceExtensionParameterValue>
|
||||
</ResourceExtensionParameterValues>
|
||||
<State>state</State>
|
||||
</ResourceExtensionReference>
|
||||
</ResourceExtensionReferences>
|
||||
<DataVirtualHardDisks></DataVirtualHardDisks>
|
||||
</Role>`; string(data) != expected {
|
||||
t.Fatalf("Expected %q, but got %q", expected, string(data))
|
||||
}
|
||||
}
|
455
vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go
generated
vendored
455
vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go
generated
vendored
|
@ -1,455 +0,0 @@
|
|||
package vmutils
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/management"
|
||||
"github.com/Azure/azure-sdk-for-go/management/hostedservice"
|
||||
"github.com/Azure/azure-sdk-for-go/management/location"
|
||||
"github.com/Azure/azure-sdk-for-go/management/osimage"
|
||||
storage "github.com/Azure/azure-sdk-for-go/management/storageservice"
|
||||
"github.com/Azure/azure-sdk-for-go/management/testutils"
|
||||
vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine"
|
||||
vmimage "github.com/Azure/azure-sdk-for-go/management/virtualmachineimage"
|
||||
)
|
||||
|
||||
func TestDeployPlatformImage(t *testing.T) {
|
||||
client := testutils.GetTestClient(t)
|
||||
vmname := GenerateName()
|
||||
sa := GetTestStorageAccount(t, client)
|
||||
location := sa.StorageServiceProperties.Location
|
||||
|
||||
role := NewVMConfiguration(vmname, "Standard_D3")
|
||||
ConfigureDeploymentFromPlatformImage(&role,
|
||||
GetLinuxTestImage(t, client).Name,
|
||||
fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname),
|
||||
GenerateName())
|
||||
ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword())
|
||||
ConfigureWithPublicSSH(&role)
|
||||
|
||||
testRoleConfiguration(t, client, role, location)
|
||||
}
|
||||
|
||||
func TestDeployPlatformWindowsImage(t *testing.T) {
|
||||
client := testutils.GetTestClient(t)
|
||||
vmname := GenerateName()
|
||||
sa := GetTestStorageAccount(t, client)
|
||||
location := sa.StorageServiceProperties.Location
|
||||
|
||||
role := NewVMConfiguration(vmname, "Standard_D3")
|
||||
ConfigureDeploymentFromPlatformImage(&role,
|
||||
GetWindowsTestImage(t, client).Name,
|
||||
fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname),
|
||||
GenerateName())
|
||||
ConfigureForWindows(&role, vmname, "azureuser", GeneratePassword(), true, "")
|
||||
ConfigureWinRMOverHTTP(&role)
|
||||
ConfigureWinRMOverHTTPS(&role, "")
|
||||
|
||||
testRoleConfiguration(t, client, role, location)
|
||||
}
|
||||
|
||||
func TestVMImageList(t *testing.T) {
|
||||
client := testutils.GetTestClient(t)
|
||||
vmic := vmimage.NewClient(client)
|
||||
il, _ := vmic.ListVirtualMachineImages(vmimage.ListParameters{})
|
||||
for _, im := range il.VMImages {
|
||||
t.Logf("%s -%s", im.Name, im.Description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeployPlatformOSImageCaptureRedeploy(t *testing.T) {
|
||||
client := testutils.GetTestClient(t)
|
||||
vmname := GenerateName()
|
||||
sa := GetTestStorageAccount(t, client)
|
||||
location := sa.StorageServiceProperties.Location
|
||||
|
||||
role := NewVMConfiguration(vmname, "Standard_D3")
|
||||
ConfigureDeploymentFromPlatformImage(&role,
|
||||
GetLinuxTestImage(t, client).Name,
|
||||
fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname),
|
||||
GenerateName())
|
||||
ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword())
|
||||
ConfigureWithPublicSSH(&role)
|
||||
|
||||
t.Logf("Deploying VM: %s", vmname)
|
||||
createRoleConfiguration(t, client, role, location)
|
||||
|
||||
t.Logf("Wait for deployment to enter running state")
|
||||
vmc := vm.NewClient(client)
|
||||
status := vm.DeploymentStatusDeploying
|
||||
for status != vm.DeploymentStatusRunning {
|
||||
deployment, err := vmc.GetDeployment(vmname, vmname)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
break
|
||||
}
|
||||
status = deployment.Status
|
||||
}
|
||||
|
||||
t.Logf("Shutting down VM: %s", vmname)
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmc.ShutdownRole(vmname, vmname, vmname)
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := WaitForDeploymentPowerState(client, vmname, vmname, vm.PowerStateStopped); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
imagename := GenerateName()
|
||||
t.Logf("Capturing OSImage: %s", imagename)
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmc.CaptureRole(vmname, vmname, vmname, imagename, imagename, nil)
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
im := GetUserOSImage(t, client, imagename)
|
||||
t.Logf("Found image: %+v", im)
|
||||
|
||||
newvmname := GenerateName()
|
||||
role = NewVMConfiguration(newvmname, "Standard_D3")
|
||||
ConfigureDeploymentFromPlatformImage(&role,
|
||||
im.Name,
|
||||
fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, newvmname),
|
||||
GenerateName())
|
||||
ConfigureForLinux(&role, newvmname, "azureuser", GeneratePassword())
|
||||
ConfigureWithPublicSSH(&role)
|
||||
|
||||
t.Logf("Deploying new VM from freshly captured OS image: %s", newvmname)
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{})
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
deleteHostedService(t, client, vmname)
|
||||
}
|
||||
|
||||
func TestDeployPlatformVMImageCaptureRedeploy(t *testing.T) {
|
||||
client := testutils.GetTestClient(t)
|
||||
vmname := GenerateName()
|
||||
sa := GetTestStorageAccount(t, client)
|
||||
location := sa.StorageServiceProperties.Location
|
||||
|
||||
role := NewVMConfiguration(vmname, "Standard_D3")
|
||||
ConfigureDeploymentFromPlatformImage(&role,
|
||||
GetLinuxTestImage(t, client).Name,
|
||||
fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname),
|
||||
GenerateName())
|
||||
ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword())
|
||||
ConfigureWithPublicSSH(&role)
|
||||
|
||||
t.Logf("Deploying VM: %s", vmname)
|
||||
createRoleConfiguration(t, client, role, location)
|
||||
|
||||
t.Logf("Wait for deployment to enter running state")
|
||||
vmc := vm.NewClient(client)
|
||||
status := vm.DeploymentStatusDeploying
|
||||
for status != vm.DeploymentStatusRunning {
|
||||
deployment, err := vmc.GetDeployment(vmname, vmname)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
break
|
||||
}
|
||||
status = deployment.Status
|
||||
}
|
||||
|
||||
t.Logf("Shutting down VM: %s", vmname)
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmc.ShutdownRole(vmname, vmname, vmname)
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := WaitForDeploymentInstanceStatus(client, vmname, vmname, vm.InstanceStatusStoppedVM); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
imagename := GenerateName()
|
||||
t.Logf("Capturing VMImage: %s", imagename)
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmimage.NewClient(client).Capture(vmname, vmname, vmname, imagename, imagename, vmimage.OSStateGeneralized, vmimage.CaptureParameters{})
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
im := GetUserVMImage(t, client, imagename)
|
||||
t.Logf("Found image: %+v", im)
|
||||
|
||||
newvmname := GenerateName()
|
||||
role = NewVMConfiguration(newvmname, "Standard_D3")
|
||||
ConfigureDeploymentFromUserVMImage(&role, im.Name)
|
||||
ConfigureForLinux(&role, newvmname, "azureuser", GeneratePassword())
|
||||
ConfigureWithPublicSSH(&role)
|
||||
|
||||
t.Logf("Deploying new VM from freshly captured VM image: %s", newvmname)
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{})
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
deleteHostedService(t, client, vmname)
|
||||
}
|
||||
|
||||
func TestDeployFromPublishedVmImage(t *testing.T) {
|
||||
client := testutils.GetTestClient(t)
|
||||
vmname := GenerateName()
|
||||
sa := GetTestStorageAccount(t, client)
|
||||
location := sa.StorageServiceProperties.Location
|
||||
|
||||
im := GetVMImage(t, client, func(im vmimage.VMImage) bool {
|
||||
return im.Name ==
|
||||
"fb83b3509582419d99629ce476bcb5c8__SQL-Server-2014-RTM-12.0.2430.0-OLTP-ENU-Win2012R2-cy14su11"
|
||||
})
|
||||
|
||||
role := NewVMConfiguration(vmname, "Standard_D4")
|
||||
ConfigureDeploymentFromPublishedVMImage(&role, im.Name,
|
||||
fmt.Sprintf("http://%s.blob.core.windows.net/%s", sa.ServiceName, vmname), false)
|
||||
ConfigureForWindows(&role, vmname, "azureuser", GeneratePassword(), true, "")
|
||||
ConfigureWithPublicSSH(&role)
|
||||
|
||||
testRoleConfiguration(t, client, role, location)
|
||||
}
|
||||
|
||||
func TestRoleStateOperations(t *testing.T) {
|
||||
client := testutils.GetTestClient(t)
|
||||
vmname := GenerateName()
|
||||
sa := GetTestStorageAccount(t, client)
|
||||
location := sa.StorageServiceProperties.Location
|
||||
|
||||
role := NewVMConfiguration(vmname, "Standard_D3")
|
||||
ConfigureDeploymentFromPlatformImage(&role,
|
||||
GetLinuxTestImage(t, client).Name,
|
||||
fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname),
|
||||
GenerateName())
|
||||
ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword())
|
||||
|
||||
createRoleConfiguration(t, client, role, location)
|
||||
|
||||
vmc := vm.NewClient(client)
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmc.ShutdownRole(vmname, vmname, vmname)
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmc.StartRole(vmname, vmname, vmname)
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmc.RestartRole(vmname, vmname, vmname)
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
deleteHostedService(t, client, vmname)
|
||||
}
|
||||
|
||||
func testRoleConfiguration(t *testing.T, client management.Client, role vm.Role, location string) {
|
||||
createRoleConfiguration(t, client, role, location)
|
||||
|
||||
deleteHostedService(t, client, role.RoleName)
|
||||
}
|
||||
|
||||
func createRoleConfiguration(t *testing.T, client management.Client, role vm.Role, location string) {
|
||||
vmc := vm.NewClient(client)
|
||||
hsc := hostedservice.NewClient(client)
|
||||
vmname := role.RoleName
|
||||
|
||||
if err := hsc.CreateHostedService(hostedservice.CreateHostedServiceParameters{
|
||||
ServiceName: vmname, Location: location,
|
||||
Label: base64.StdEncoding.EncodeToString([]byte(vmname))}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{})
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteHostedService(t *testing.T, client management.Client, vmname string) {
|
||||
t.Logf("Deleting hosted service: %s", vmname)
|
||||
if err := Await(client, func() (management.OperationID, error) {
|
||||
return hostedservice.NewClient(client).DeleteHostedService(vmname, true)
|
||||
}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// === utility funcs ===
|
||||
|
||||
func GetTestStorageAccount(t *testing.T, client management.Client) storage.StorageServiceResponse {
|
||||
t.Log("Retrieving storage account")
|
||||
sc := storage.NewClient(client)
|
||||
var sa storage.StorageServiceResponse
|
||||
ssl, err := sc.ListStorageServices()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
if len(ssl.StorageServices) == 0 {
|
||||
t.Log("No storage accounts found, creating a new one")
|
||||
lc := location.NewClient(client)
|
||||
ll, err := lc.ListLocations()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
loc := ll.Locations[rnd.Intn(len(ll.Locations))].Name
|
||||
|
||||
t.Logf("Location for new storage account: %s", loc)
|
||||
name := GenerateName()
|
||||
op, err := sc.CreateStorageService(storage.StorageAccountCreateParameters{
|
||||
ServiceName: name,
|
||||
Label: base64.StdEncoding.EncodeToString([]byte(name)),
|
||||
Location: loc,
|
||||
AccountType: storage.AccountTypeStandardLRS})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := client.WaitForOperation(op, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sa, err = sc.GetStorageService(name)
|
||||
} else {
|
||||
|
||||
sa = ssl.StorageServices[rnd.Intn(len(ssl.StorageServices))]
|
||||
}
|
||||
|
||||
t.Logf("Selected storage account '%s' in location '%s'",
|
||||
sa.ServiceName, sa.StorageServiceProperties.Location)
|
||||
|
||||
return sa
|
||||
}
|
||||
|
||||
func GetLinuxTestImage(t *testing.T, client management.Client) osimage.OSImage {
|
||||
return GetOSImage(t, client, func(im osimage.OSImage) bool {
|
||||
return im.Category == "Public" && im.ImageFamily == "Ubuntu Server 14.04 LTS"
|
||||
})
|
||||
}
|
||||
|
||||
func GetWindowsTestImage(t *testing.T, client management.Client) osimage.OSImage {
|
||||
return GetOSImage(t, client, func(im osimage.OSImage) bool {
|
||||
return im.Category == "Public" && im.ImageFamily == "Windows Server 2012 R2 Datacenter"
|
||||
})
|
||||
}
|
||||
|
||||
func GetUserOSImage(t *testing.T, client management.Client, name string) osimage.OSImage {
|
||||
return GetOSImage(t, client, func(im osimage.OSImage) bool {
|
||||
return im.Category == "User" && im.Name == name
|
||||
})
|
||||
}
|
||||
|
||||
func GetOSImage(
|
||||
t *testing.T,
|
||||
client management.Client,
|
||||
filter func(osimage.OSImage) bool) osimage.OSImage {
|
||||
t.Log("Selecting OS image")
|
||||
osc := osimage.NewClient(client)
|
||||
allimages, err := osc.ListOSImages()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
filtered := []osimage.OSImage{}
|
||||
for _, im := range allimages.OSImages {
|
||||
if filter(im) {
|
||||
filtered = append(filtered, im)
|
||||
}
|
||||
}
|
||||
if len(filtered) == 0 {
|
||||
t.Fatal("Filter too restrictive, no images left?")
|
||||
}
|
||||
|
||||
image := filtered[0]
|
||||
for _, im := range filtered {
|
||||
if im.PublishedDate > image.PublishedDate {
|
||||
image = im
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Selecting image '%s'", image.Name)
|
||||
return image
|
||||
}
|
||||
|
||||
func GetUserVMImage(t *testing.T, client management.Client, name string) vmimage.VMImage {
|
||||
return GetVMImage(t, client, func(im vmimage.VMImage) bool {
|
||||
return im.Category == "User" && im.Name == name
|
||||
})
|
||||
}
|
||||
|
||||
func GetVMImage(
|
||||
t *testing.T,
|
||||
client management.Client,
|
||||
filter func(vmimage.VMImage) bool) vmimage.VMImage {
|
||||
t.Log("Selecting VM image")
|
||||
allimages, err := vmimage.NewClient(client).ListVirtualMachineImages(vmimage.ListParameters{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
filtered := []vmimage.VMImage{}
|
||||
for _, im := range allimages.VMImages {
|
||||
if filter(im) {
|
||||
filtered = append(filtered, im)
|
||||
}
|
||||
}
|
||||
if len(filtered) == 0 {
|
||||
t.Fatal("Filter too restrictive, no images left?")
|
||||
}
|
||||
|
||||
image := filtered[0]
|
||||
for _, im := range filtered {
|
||||
if im.PublishedDate > image.PublishedDate {
|
||||
image = im
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Selecting image '%s'", image.Name)
|
||||
return image
|
||||
}
|
||||
|
||||
func GenerateName() string {
|
||||
from := "1234567890abcdefghijklmnopqrstuvwxyz"
|
||||
return "sdk" + GenerateString(12, from)
|
||||
}
|
||||
|
||||
func GeneratePassword() string {
|
||||
pw := GenerateString(20, "1234567890") +
|
||||
GenerateString(20, "abcdefghijklmnopqrstuvwxyz") +
|
||||
GenerateString(20, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
i := rnd.Intn(len(pw)-2) + 1
|
||||
|
||||
pw = string(append([]uint8(pw[i:]), pw[:i-1]...))
|
||||
|
||||
return pw
|
||||
}
|
||||
|
||||
func GenerateString(length int, from string) string {
|
||||
str := ""
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for len(str) < length {
|
||||
str += string(from[rnd.Intn(len(from))])
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
type asyncFunc func() (operationId management.OperationID, err error)
|
||||
|
||||
func Await(client management.Client, async asyncFunc) error {
|
||||
requestID, err := async()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return client.WaitForOperation(requestID, nil)
|
||||
}
|
|
@ -1,440 +0,0 @@
|
|||
package vmutils
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
|
||||
vmdisk "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk"
|
||||
)
|
||||
|
||||
func TestNewLinuxVmRemoteImage(t *testing.T) {
|
||||
role := NewVMConfiguration("myvm", "Standard_D3")
|
||||
ConfigureDeploymentFromRemoteImage(&role,
|
||||
"http://remote.host/some.vhd?sv=12&sig=ukhfiuwef78687", "Linux",
|
||||
"myvm-os-disk", "http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd",
|
||||
"OSDisk")
|
||||
ConfigureForLinux(&role, "myvm", "azureuser", "P@ssword", "2398yyKJGd78e2389ydfncuirowebhf89yh3IUOBY")
|
||||
ConfigureWithPublicSSH(&role)
|
||||
|
||||
bytes, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `<Role>
|
||||
<RoleName>myvm</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
|
||||
<StoredCertificateSettings></StoredCertificateSettings>
|
||||
<HostName>myvm</HostName>
|
||||
<UserName>azureuser</UserName>
|
||||
<UserPassword>P@ssword</UserPassword>
|
||||
<DisableSshPasswordAuthentication>false</DisableSshPasswordAuthentication>
|
||||
<SSH>
|
||||
<PublicKeys>
|
||||
<PublicKey>
|
||||
<Fingerprint>2398yyKJGd78e2389ydfncuirowebhf89yh3IUOBY</Fingerprint>
|
||||
<Path>/home/azureuser/.ssh/authorized_keys</Path>
|
||||
</PublicKey>
|
||||
</PublicKeys>
|
||||
<KeyPairs></KeyPairs>
|
||||
</SSH>
|
||||
<InputEndpoints></InputEndpoints>
|
||||
<SubnetNames></SubnetNames>
|
||||
<PublicIPs></PublicIPs>
|
||||
</ConfigurationSet>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>NetworkConfiguration</ConfigurationSetType>
|
||||
<StoredCertificateSettings></StoredCertificateSettings>
|
||||
<InputEndpoints>
|
||||
<InputEndpoint>
|
||||
<LocalPort>22</LocalPort>
|
||||
<Name>SSH</Name>
|
||||
<Port>22</Port>
|
||||
<Protocol>TCP</Protocol>
|
||||
</InputEndpoint>
|
||||
</InputEndpoints>
|
||||
<SubnetNames></SubnetNames>
|
||||
<PublicIPs></PublicIPs>
|
||||
</ConfigurationSet>
|
||||
</ConfigurationSets>
|
||||
<DataVirtualHardDisks></DataVirtualHardDisks>
|
||||
<OSVirtualHardDisk>
|
||||
<DiskLabel>OSDisk</DiskLabel>
|
||||
<DiskName>myvm-os-disk</DiskName>
|
||||
<MediaLink>http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd</MediaLink>
|
||||
<OS>Linux</OS>
|
||||
<RemoteSourceImageLink>http://remote.host/some.vhd?sv=12&sig=ukhfiuwef78687</RemoteSourceImageLink>
|
||||
</OSVirtualHardDisk>
|
||||
<RoleSize>Standard_D3</RoleSize>
|
||||
<ProvisionGuestAgent>true</ProvisionGuestAgent>
|
||||
</Role>`
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewLinuxVmPlatformImage(t *testing.T) {
|
||||
role := NewVMConfiguration("myplatformvm", "Standard_D3")
|
||||
ConfigureDeploymentFromPlatformImage(&role,
|
||||
"b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_2_LTS-amd64-server-20150309-en-us-30GB",
|
||||
"http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd", "mydisklabel")
|
||||
ConfigureForLinux(&role, "myvm", "azureuser", "", "2398yyKJGd78e2389ydfncuirdebhf89yh3IUOBY")
|
||||
|
||||
bytes, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `<Role>
|
||||
<RoleName>myplatformvm</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
|
||||
<StoredCertificateSettings></StoredCertificateSettings>
|
||||
<HostName>myvm</HostName>
|
||||
<UserName>azureuser</UserName>
|
||||
<SSH>
|
||||
<PublicKeys>
|
||||
<PublicKey>
|
||||
<Fingerprint>2398yyKJGd78e2389ydfncuirdebhf89yh3IUOBY</Fingerprint>
|
||||
<Path>/home/azureuser/.ssh/authorized_keys</Path>
|
||||
</PublicKey>
|
||||
</PublicKeys>
|
||||
<KeyPairs></KeyPairs>
|
||||
</SSH>
|
||||
<InputEndpoints></InputEndpoints>
|
||||
<SubnetNames></SubnetNames>
|
||||
<PublicIPs></PublicIPs>
|
||||
</ConfigurationSet>
|
||||
</ConfigurationSets>
|
||||
<DataVirtualHardDisks></DataVirtualHardDisks>
|
||||
<OSVirtualHardDisk>
|
||||
<MediaLink>http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd</MediaLink>
|
||||
<SourceImageName>b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_2_LTS-amd64-server-20150309-en-us-30GB</SourceImageName>
|
||||
</OSVirtualHardDisk>
|
||||
<RoleSize>Standard_D3</RoleSize>
|
||||
<ProvisionGuestAgent>true</ProvisionGuestAgent>
|
||||
</Role>`
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVmFromVMImage(t *testing.T) {
|
||||
role := NewVMConfiguration("restoredbackup", "Standard_D1")
|
||||
ConfigureDeploymentFromPublishedVMImage(&role, "myvm-backup-20150209",
|
||||
"http://mystorageacct.blob.core.windows.net/vhds/myoldnewvm.vhd", false)
|
||||
|
||||
bytes, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `<Role>
|
||||
<RoleName>restoredbackup</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets></ConfigurationSets>
|
||||
<VMImageName>myvm-backup-20150209</VMImageName>
|
||||
<MediaLocation>http://mystorageacct.blob.core.windows.net/vhds/myoldnewvm.vhd</MediaLocation>
|
||||
<DataVirtualHardDisks></DataVirtualHardDisks>
|
||||
<RoleSize>Standard_D1</RoleSize>
|
||||
</Role>`
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVmFromExistingDisk(t *testing.T) {
|
||||
role := NewVMConfiguration("blobvm", "Standard_D14")
|
||||
ConfigureDeploymentFromExistingOSDisk(&role, "myvm-backup-20150209", "OSDisk")
|
||||
ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "")
|
||||
ConfigureWindowsToJoinDomain(&role, "user@domain.com", "youReN3verG0nnaGu3ss", "redmond.corp.contoso.com", "")
|
||||
ConfigureWithNewDataDisk(&role, "my-brand-new-disk", "http://account.blob.core.windows.net/vhds/newdatadisk.vhd",
|
||||
30, vmdisk.HostCachingTypeReadWrite)
|
||||
ConfigureWithExistingDataDisk(&role, "data-disk", vmdisk.HostCachingTypeReadOnly)
|
||||
|
||||
bytes, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `<Role>
|
||||
<RoleName>blobvm</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>WindowsProvisioningConfiguration</ConfigurationSetType>
|
||||
<ComputerName>WINVM</ComputerName>
|
||||
<AdminPassword>P2ssw@rd</AdminPassword>
|
||||
<EnableAutomaticUpdates>true</EnableAutomaticUpdates>
|
||||
<DomainJoin>
|
||||
<Credentials>
|
||||
<Domain></Domain>
|
||||
<Username>user@domain.com</Username>
|
||||
<Password>youReN3verG0nnaGu3ss</Password>
|
||||
</Credentials>
|
||||
<JoinDomain>redmond.corp.contoso.com</JoinDomain>
|
||||
</DomainJoin>
|
||||
<StoredCertificateSettings></StoredCertificateSettings>
|
||||
<AdminUsername>azuser</AdminUsername>
|
||||
<InputEndpoints></InputEndpoints>
|
||||
<SubnetNames></SubnetNames>
|
||||
<PublicIPs></PublicIPs>
|
||||
</ConfigurationSet>
|
||||
</ConfigurationSets>
|
||||
<DataVirtualHardDisks>
|
||||
<DataVirtualHardDisk>
|
||||
<HostCaching>ReadWrite</HostCaching>
|
||||
<DiskLabel>my-brand-new-disk</DiskLabel>
|
||||
<LogicalDiskSizeInGB>30</LogicalDiskSizeInGB>
|
||||
<MediaLink>http://account.blob.core.windows.net/vhds/newdatadisk.vhd</MediaLink>
|
||||
</DataVirtualHardDisk>
|
||||
<DataVirtualHardDisk>
|
||||
<HostCaching>ReadOnly</HostCaching>
|
||||
<DiskName>data-disk</DiskName>
|
||||
<Lun>1</Lun>
|
||||
</DataVirtualHardDisk>
|
||||
</DataVirtualHardDisks>
|
||||
<OSVirtualHardDisk>
|
||||
<DiskLabel>OSDisk</DiskLabel>
|
||||
<DiskName>myvm-backup-20150209</DiskName>
|
||||
</OSVirtualHardDisk>
|
||||
<RoleSize>Standard_D14</RoleSize>
|
||||
<ProvisionGuestAgent>true</ProvisionGuestAgent>
|
||||
</Role>`
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWinRMOverHttps(t *testing.T) {
|
||||
role := NewVMConfiguration("winrmoverhttp", "Standard_D1")
|
||||
ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "")
|
||||
ConfigureWinRMOverHTTPS(&role, "abcdef")
|
||||
|
||||
bytes, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `<Role>
|
||||
<RoleName>winrmoverhttp</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>WindowsProvisioningConfiguration</ConfigurationSetType>
|
||||
<ComputerName>WINVM</ComputerName>
|
||||
<AdminPassword>P2ssw@rd</AdminPassword>
|
||||
<EnableAutomaticUpdates>true</EnableAutomaticUpdates>
|
||||
<StoredCertificateSettings></StoredCertificateSettings>
|
||||
<WinRM>
|
||||
<Listeners>
|
||||
<Listener>
|
||||
<Protocol>Https</Protocol>
|
||||
<CertificateThumbprint>abcdef</CertificateThumbprint>
|
||||
</Listener>
|
||||
</Listeners>
|
||||
</WinRM>
|
||||
<AdminUsername>azuser</AdminUsername>
|
||||
<InputEndpoints></InputEndpoints>
|
||||
<SubnetNames></SubnetNames>
|
||||
<PublicIPs></PublicIPs>
|
||||
</ConfigurationSet>
|
||||
</ConfigurationSets>
|
||||
<DataVirtualHardDisks></DataVirtualHardDisks>
|
||||
<RoleSize>Standard_D1</RoleSize>
|
||||
<ProvisionGuestAgent>true</ProvisionGuestAgent>
|
||||
</Role>`
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWinRMOverHttpsWithNoThumbprint(t *testing.T) {
|
||||
role := NewVMConfiguration("winrmoverhttp", "Standard_D1")
|
||||
ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "")
|
||||
ConfigureWinRMOverHTTPS(&role, "")
|
||||
|
||||
bytes, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `<Role>
|
||||
<RoleName>winrmoverhttp</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>WindowsProvisioningConfiguration</ConfigurationSetType>
|
||||
<ComputerName>WINVM</ComputerName>
|
||||
<AdminPassword>P2ssw@rd</AdminPassword>
|
||||
<EnableAutomaticUpdates>true</EnableAutomaticUpdates>
|
||||
<StoredCertificateSettings></StoredCertificateSettings>
|
||||
<WinRM>
|
||||
<Listeners>
|
||||
<Listener>
|
||||
<Protocol>Https</Protocol>
|
||||
</Listener>
|
||||
</Listeners>
|
||||
</WinRM>
|
||||
<AdminUsername>azuser</AdminUsername>
|
||||
<InputEndpoints></InputEndpoints>
|
||||
<SubnetNames></SubnetNames>
|
||||
<PublicIPs></PublicIPs>
|
||||
</ConfigurationSet>
|
||||
</ConfigurationSets>
|
||||
<DataVirtualHardDisks></DataVirtualHardDisks>
|
||||
<RoleSize>Standard_D1</RoleSize>
|
||||
<ProvisionGuestAgent>true</ProvisionGuestAgent>
|
||||
</Role>`
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWinRMOverHttp(t *testing.T) {
|
||||
role := NewVMConfiguration("winrmoverhttp", "Standard_D1")
|
||||
ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "")
|
||||
ConfigureWinRMOverHTTP(&role)
|
||||
|
||||
bytes, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `<Role>
|
||||
<RoleName>winrmoverhttp</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>WindowsProvisioningConfiguration</ConfigurationSetType>
|
||||
<ComputerName>WINVM</ComputerName>
|
||||
<AdminPassword>P2ssw@rd</AdminPassword>
|
||||
<EnableAutomaticUpdates>true</EnableAutomaticUpdates>
|
||||
<StoredCertificateSettings></StoredCertificateSettings>
|
||||
<WinRM>
|
||||
<Listeners>
|
||||
<Listener>
|
||||
<Protocol>Http</Protocol>
|
||||
</Listener>
|
||||
</Listeners>
|
||||
</WinRM>
|
||||
<AdminUsername>azuser</AdminUsername>
|
||||
<InputEndpoints></InputEndpoints>
|
||||
<SubnetNames></SubnetNames>
|
||||
<PublicIPs></PublicIPs>
|
||||
</ConfigurationSet>
|
||||
</ConfigurationSets>
|
||||
<DataVirtualHardDisks></DataVirtualHardDisks>
|
||||
<RoleSize>Standard_D1</RoleSize>
|
||||
<ProvisionGuestAgent>true</ProvisionGuestAgent>
|
||||
</Role>`
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSettingWinRMOverHttpTwice(t *testing.T) {
|
||||
role := NewVMConfiguration("winrmoverhttp", "Standard_D1")
|
||||
ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "")
|
||||
ConfigureWinRMOverHTTP(&role)
|
||||
ConfigureWinRMOverHTTP(&role)
|
||||
|
||||
bytes, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `<Role>
|
||||
<RoleName>winrmoverhttp</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>WindowsProvisioningConfiguration</ConfigurationSetType>
|
||||
<ComputerName>WINVM</ComputerName>
|
||||
<AdminPassword>P2ssw@rd</AdminPassword>
|
||||
<EnableAutomaticUpdates>true</EnableAutomaticUpdates>
|
||||
<StoredCertificateSettings></StoredCertificateSettings>
|
||||
<WinRM>
|
||||
<Listeners>
|
||||
<Listener>
|
||||
<Protocol>Http</Protocol>
|
||||
</Listener>
|
||||
</Listeners>
|
||||
</WinRM>
|
||||
<AdminUsername>azuser</AdminUsername>
|
||||
<InputEndpoints></InputEndpoints>
|
||||
<SubnetNames></SubnetNames>
|
||||
<PublicIPs></PublicIPs>
|
||||
</ConfigurationSet>
|
||||
</ConfigurationSets>
|
||||
<DataVirtualHardDisks></DataVirtualHardDisks>
|
||||
<RoleSize>Standard_D1</RoleSize>
|
||||
<ProvisionGuestAgent>true</ProvisionGuestAgent>
|
||||
</Role>`
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSettingWinRMOverHttpAndHttpsTwice(t *testing.T) {
|
||||
role := NewVMConfiguration("winrmoverhttp", "Standard_D1")
|
||||
ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "")
|
||||
ConfigureWinRMOverHTTP(&role)
|
||||
ConfigureWinRMOverHTTPS(&role, "")
|
||||
ConfigureWinRMOverHTTP(&role)
|
||||
ConfigureWinRMOverHTTPS(&role, "abcdef")
|
||||
|
||||
bytes, err := xml.MarshalIndent(role, "", " ")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `<Role>
|
||||
<RoleName>winrmoverhttp</RoleName>
|
||||
<RoleType>PersistentVMRole</RoleType>
|
||||
<ConfigurationSets>
|
||||
<ConfigurationSet>
|
||||
<ConfigurationSetType>WindowsProvisioningConfiguration</ConfigurationSetType>
|
||||
<ComputerName>WINVM</ComputerName>
|
||||
<AdminPassword>P2ssw@rd</AdminPassword>
|
||||
<EnableAutomaticUpdates>true</EnableAutomaticUpdates>
|
||||
<StoredCertificateSettings></StoredCertificateSettings>
|
||||
<WinRM>
|
||||
<Listeners>
|
||||
<Listener>
|
||||
<Protocol>Http</Protocol>
|
||||
</Listener>
|
||||
<Listener>
|
||||
<Protocol>Https</Protocol>
|
||||
<CertificateThumbprint>abcdef</CertificateThumbprint>
|
||||
</Listener>
|
||||
</Listeners>
|
||||
</WinRM>
|
||||
<AdminUsername>azuser</AdminUsername>
|
||||
<InputEndpoints></InputEndpoints>
|
||||
<SubnetNames></SubnetNames>
|
||||
<PublicIPs></PublicIPs>
|
||||
</ConfigurationSet>
|
||||
</ConfigurationSets>
|
||||
<DataVirtualHardDisks></DataVirtualHardDisks>
|
||||
<RoleSize>Standard_D1</RoleSize>
|
||||
<ProvisionGuestAgent>true</ProvisionGuestAgent>
|
||||
</Role>`
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes))
|
||||
}
|
||||
}
|
|
@ -1,715 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type StorageBlobSuite struct{}
|
||||
|
||||
var _ = chk.Suite(&StorageBlobSuite{})
|
||||
|
||||
const testContainerPrefix = "zzzztest-"
|
||||
|
||||
func getBlobClient(c *chk.C) BlobStorageClient {
|
||||
return getBasicClient(c).GetBlobService()
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) {
|
||||
c.Assert(pathForContainer("foo"), chk.Equals, "/foo")
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) {
|
||||
c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob")
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) {
|
||||
_, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP")
|
||||
c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15
|
||||
|
||||
out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP")
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n")
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) {
|
||||
api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true)
|
||||
c.Assert(err, chk.IsNil)
|
||||
cli := api.GetBlobService()
|
||||
expiry := time.Time{}
|
||||
|
||||
expectedParts := url.URL{
|
||||
Scheme: "https",
|
||||
Host: "foo.blob.core.windows.net",
|
||||
Path: "container/name",
|
||||
RawQuery: url.Values{
|
||||
"sv": {"2013-08-15"},
|
||||
"sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="},
|
||||
"sr": {"b"},
|
||||
"sp": {"r"},
|
||||
"se": {"0001-01-01T00:00:00Z"},
|
||||
}.Encode()}
|
||||
|
||||
u, err := cli.GetBlobSASURI("container", "name", expiry, "r")
|
||||
c.Assert(err, chk.IsNil)
|
||||
sasParts, err := url.Parse(u)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(expectedParts.String(), chk.Equals, sasParts.String())
|
||||
c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query())
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
body := []byte(randString(100))
|
||||
expiry := time.Now().UTC().Add(time.Hour)
|
||||
permissions := "r"
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil)
|
||||
|
||||
sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
resp, err := http.Get(sasURI)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
blobResp, err := ioutil.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
c.Assert(resp.StatusCode, chk.Equals, http.StatusOK)
|
||||
c.Assert(len(blobResp), chk.Equals, len(body))
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(deleteTestContainers(cli), chk.IsNil)
|
||||
|
||||
const n = 5
|
||||
const pageSize = 2
|
||||
|
||||
// Create test containers
|
||||
created := []string{}
|
||||
for i := 0; i < n; i++ {
|
||||
name := randContainer()
|
||||
c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil)
|
||||
created = append(created, name)
|
||||
}
|
||||
sort.Strings(created)
|
||||
|
||||
// Defer test container deletions
|
||||
defer func() {
|
||||
var wg sync.WaitGroup
|
||||
for _, cnt := range created {
|
||||
wg.Add(1)
|
||||
go func(name string) {
|
||||
c.Assert(cli.DeleteContainer(name), chk.IsNil)
|
||||
wg.Done()
|
||||
}(cnt)
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
// Paginate results
|
||||
seen := []string{}
|
||||
marker := ""
|
||||
for {
|
||||
resp, err := cli.ListContainers(ListContainersParameters{
|
||||
Prefix: testContainerPrefix,
|
||||
MaxResults: pageSize,
|
||||
Marker: marker})
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
containers := resp.Containers
|
||||
if len(containers) > pageSize {
|
||||
c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers))
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
seen = append(seen, c.Name)
|
||||
}
|
||||
|
||||
marker = resp.NextMarker
|
||||
if marker == "" || len(containers) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
c.Assert(seen, chk.DeepEquals, created)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestContainerExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
cli := getBlobClient(c)
|
||||
ok, err := cli.ContainerExists(cnt)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
ok, err = cli.ContainerExists(cnt)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestCreateContainerDeleteContainer(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
c.Assert(cli.DeleteContainer(cnt), chk.IsNil)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
cli := getBlobClient(c)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
// First create
|
||||
ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
|
||||
// Second create, should not give errors
|
||||
ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
cli := getBlobClient(c)
|
||||
|
||||
// Nonexisting container
|
||||
c.Assert(cli.DeleteContainer(cnt), chk.NotNil)
|
||||
|
||||
ok, err := cli.DeleteContainerIfExists(cnt)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
|
||||
// Existing container
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
ok, err = cli.DeleteContainerIfExists(cnt)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestBlobExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
cli := getBlobClient(c)
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil)
|
||||
defer cli.DeleteBlob(cnt, blob)
|
||||
|
||||
ok, err := cli.BlobExists(cnt, blob+".foo")
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
|
||||
ok, err = cli.BlobExists(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) {
|
||||
api, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
cli := api.GetBlobService()
|
||||
|
||||
c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob")
|
||||
c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob")
|
||||
c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob")
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) {
|
||||
if testing.Short() {
|
||||
c.Skip("skipping blob copy in short mode, no SLA on async operation")
|
||||
}
|
||||
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
src := randString(20)
|
||||
dst := randString(20)
|
||||
body := []byte(randString(1024))
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil)
|
||||
defer cli.DeleteBlob(cnt, src)
|
||||
|
||||
c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil)
|
||||
defer cli.DeleteBlob(cnt, dst)
|
||||
|
||||
blobBody, err := cli.GetBlob(cnt, dst)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
b, err := ioutil.ReadAll(blobBody)
|
||||
defer blobBody.Close()
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(b, chk.DeepEquals, body)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(cli.DeleteBlob(cnt, blob), chk.NotNil)
|
||||
|
||||
ok, err := cli.DeleteBlobIfExists(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
contents := randString(64)
|
||||
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
// Nonexisting blob
|
||||
_, err := cli.GetBlobProperties(cnt, blob)
|
||||
c.Assert(err, chk.NotNil)
|
||||
|
||||
// Put the blob
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil)
|
||||
|
||||
// Get blob properties
|
||||
props, err := cli.GetBlobProperties(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
c.Assert(props.ContentLength, chk.Equals, int64(len(contents)))
|
||||
c.Assert(props.BlobType, chk.Equals, BlobTypeBlock)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
blobs := []string{}
|
||||
const n = 5
|
||||
const pageSize = 2
|
||||
for i := 0; i < n; i++ {
|
||||
name := randString(20)
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil)
|
||||
blobs = append(blobs, name)
|
||||
}
|
||||
sort.Strings(blobs)
|
||||
|
||||
// Paginate
|
||||
seen := []string{}
|
||||
marker := ""
|
||||
for {
|
||||
resp, err := cli.ListBlobs(cnt, ListBlobsParameters{
|
||||
MaxResults: pageSize,
|
||||
Marker: marker})
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
for _, v := range resp.Blobs {
|
||||
seen = append(seen, v.Name)
|
||||
}
|
||||
|
||||
marker = resp.NextMarker
|
||||
if marker == "" || len(resp.Blobs) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Compare
|
||||
c.Assert(seen, chk.DeepEquals, blobs)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetAndSetMetadata(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil)
|
||||
|
||||
m, err := cli.GetBlobMetadata(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(m, chk.Not(chk.Equals), nil)
|
||||
c.Assert(len(m), chk.Equals, 0)
|
||||
|
||||
mPut := map[string]string{
|
||||
"foo": "bar",
|
||||
"bar_baz": "waz qux",
|
||||
}
|
||||
|
||||
err = cli.SetBlobMetadata(cnt, blob, mPut)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
m, err = cli.GetBlobMetadata(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Check(m, chk.DeepEquals, mPut)
|
||||
|
||||
// Case munging
|
||||
|
||||
mPutUpper := map[string]string{
|
||||
"Foo": "different bar",
|
||||
"bar_BAZ": "different waz qux",
|
||||
}
|
||||
mExpectLower := map[string]string{
|
||||
"foo": "different bar",
|
||||
"bar_baz": "different waz qux",
|
||||
}
|
||||
|
||||
err = cli.SetBlobMetadata(cnt, blob, mPutUpper)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
m, err = cli.GetBlobMetadata(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Check(m, chk.DeepEquals, mExpectLower)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil)
|
||||
|
||||
props, err := cli.GetBlobProperties(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(props.ContentLength, chk.Not(chk.Equals), 0)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) {
|
||||
cnt := randContainer()
|
||||
blob := randString(20)
|
||||
body := "0123456789"
|
||||
|
||||
cli := getBlobClient(c)
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
|
||||
defer cli.DeleteContainer(cnt)
|
||||
|
||||
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil)
|
||||
defer cli.DeleteBlob(cnt, blob)
|
||||
|
||||
// Read 1-3
|
||||
for _, r := range []struct {
|
||||
rangeStr string
|
||||
expected string
|
||||
}{
|
||||
{"0-", body},
|
||||
{"1-3", body[1 : 3+1]},
|
||||
{"3-", body[3:]},
|
||||
} {
|
||||
resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr)
|
||||
c.Assert(err, chk.IsNil)
|
||||
blobBody, err := ioutil.ReadAll(resp)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
str := string(blobBody)
|
||||
c.Assert(str, chk.Equals, r.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestCreateBlockBlobFromReader(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
name := randString(20)
|
||||
data := randBytes(8888)
|
||||
c.Assert(cli.CreateBlockBlobFromReader(cnt, name, uint64(len(data)), bytes.NewReader(data), nil), chk.IsNil)
|
||||
|
||||
body, err := cli.GetBlob(cnt, name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
gotData, err := ioutil.ReadAll(body)
|
||||
body.Close()
|
||||
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(gotData, chk.DeepEquals, data)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestCreateBlockBlobFromReaderWithShortData(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
name := randString(20)
|
||||
data := randBytes(8888)
|
||||
err := cli.CreateBlockBlobFromReader(cnt, name, 9999, bytes.NewReader(data), nil)
|
||||
c.Assert(err, chk.Not(chk.IsNil))
|
||||
|
||||
_, err = cli.GetBlob(cnt, name)
|
||||
// Upload was incomplete: blob should not have been created.
|
||||
c.Assert(err, chk.Not(chk.IsNil))
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutBlock(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
chunk := []byte(randString(1024))
|
||||
blockID := base64.StdEncoding.EncodeToString([]byte("foo"))
|
||||
c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
chunk := []byte(randString(1024))
|
||||
blockID := base64.StdEncoding.EncodeToString([]byte("foo"))
|
||||
|
||||
// Put one block
|
||||
c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil)
|
||||
defer cli.deleteBlob(cnt, blob)
|
||||
|
||||
// Get committed blocks
|
||||
committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
if len(committed.CommittedBlocks) > 0 {
|
||||
c.Fatal("There are committed blocks")
|
||||
}
|
||||
|
||||
// Get uncommitted blocks
|
||||
uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1)
|
||||
// Commit block list
|
||||
c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil)
|
||||
|
||||
// Get all blocks
|
||||
all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(all.CommittedBlocks), chk.Equals, 1)
|
||||
c.Assert(len(all.UncommittedBlocks), chk.Equals, 0)
|
||||
|
||||
// Verify the block
|
||||
thatBlock := all.CommittedBlocks[0]
|
||||
c.Assert(thatBlock.Name, chk.Equals, blockID)
|
||||
c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk)))
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil)
|
||||
|
||||
// Verify
|
||||
blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0)
|
||||
c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
size := int64(10 * 1024 * 1024)
|
||||
c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil)
|
||||
|
||||
// Verify
|
||||
props, err := cli.GetBlobProperties(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(props.ContentLength, chk.Equals, size)
|
||||
c.Assert(props.BlobType, chk.Equals, BlobTypePage)
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
size := int64(10 * 1024 * 1024) // larger than we'll use
|
||||
c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil)
|
||||
|
||||
chunk1 := []byte(randString(1024))
|
||||
chunk2 := []byte(randString(512))
|
||||
|
||||
// Append chunks
|
||||
c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1), chk.IsNil)
|
||||
c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2), chk.IsNil)
|
||||
|
||||
// Verify contents
|
||||
out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1))
|
||||
c.Assert(err, chk.IsNil)
|
||||
defer out.Close()
|
||||
blobContents, err := ioutil.ReadAll(out)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...))
|
||||
out.Close()
|
||||
|
||||
// Overwrite first half of chunk1
|
||||
chunk0 := []byte(randString(512))
|
||||
c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0), chk.IsNil)
|
||||
|
||||
// Verify contents
|
||||
out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1))
|
||||
c.Assert(err, chk.IsNil)
|
||||
defer out.Close()
|
||||
blobContents, err = ioutil.ReadAll(out)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...))
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
size := int64(10 * 1024 * 1024) // larger than we'll use
|
||||
c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil)
|
||||
|
||||
// Put 0-2047
|
||||
chunk := []byte(randString(2048))
|
||||
c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk), chk.IsNil)
|
||||
|
||||
// Clear 512-1023
|
||||
c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil), chk.IsNil)
|
||||
|
||||
// Verify contents
|
||||
out, err := cli.GetBlobRange(cnt, blob, "0-2047")
|
||||
c.Assert(err, chk.IsNil)
|
||||
contents, err := ioutil.ReadAll(out)
|
||||
c.Assert(err, chk.IsNil)
|
||||
defer out.Close()
|
||||
c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...))
|
||||
}
|
||||
|
||||
func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) {
|
||||
cli := getBlobClient(c)
|
||||
cnt := randContainer()
|
||||
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
|
||||
defer cli.deleteContainer(cnt)
|
||||
|
||||
blob := randString(20)
|
||||
size := int64(10 * 1024 * 1024) // larger than we'll use
|
||||
c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil)
|
||||
|
||||
// Get page ranges on empty blob
|
||||
out, err := cli.GetPageRanges(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(out.PageList), chk.Equals, 0)
|
||||
|
||||
// Add 0-512 page
|
||||
c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512))), chk.IsNil)
|
||||
|
||||
out, err = cli.GetPageRanges(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(out.PageList), chk.Equals, 1)
|
||||
|
||||
// Add 1024-2048
|
||||
c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024))), chk.IsNil)
|
||||
|
||||
out, err = cli.GetPageRanges(cnt, blob)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(out.PageList), chk.Equals, 2)
|
||||
}
|
||||
|
||||
func deleteTestContainers(cli BlobStorageClient) error {
|
||||
for {
|
||||
resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(resp.Containers) == 0 {
|
||||
break
|
||||
}
|
||||
for _, c := range resp.Containers {
|
||||
err = cli.DeleteContainer(c.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error {
|
||||
if len(chunk) > MaxBlobBlockSize {
|
||||
return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize)
|
||||
}
|
||||
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
func randContainer() string {
|
||||
return testContainerPrefix + randString(32-len(testContainerPrefix))
|
||||
}
|
||||
|
||||
func randString(n int) string {
|
||||
if n <= 0 {
|
||||
panic("negative number")
|
||||
}
|
||||
const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz"
|
||||
var bytes = make([]byte, n)
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
func randBytes(n int) []byte {
|
||||
data := make([]byte, n)
|
||||
if _, err := io.ReadFull(rand.Reader, data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return data
|
||||
}
|
|
@ -1,156 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Hook up gocheck to testing
|
||||
func Test(t *testing.T) { chk.TestingT(t) }
|
||||
|
||||
type StorageClientSuite struct{}
|
||||
|
||||
var _ = chk.Suite(&StorageClientSuite{})
|
||||
|
||||
// getBasicClient returns a test client from storage credentials in the env
|
||||
func getBasicClient(c *chk.C) Client {
|
||||
name := os.Getenv("ACCOUNT_NAME")
|
||||
if name == "" {
|
||||
c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test")
|
||||
}
|
||||
key := os.Getenv("ACCOUNT_KEY")
|
||||
if key == "" {
|
||||
c.Fatal("ACCOUNT_KEY not set")
|
||||
}
|
||||
cli, err := NewBasicClient(name, key)
|
||||
c.Assert(err, chk.IsNil)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) {
|
||||
apiVersion := "2015-01-01" // a non existing one
|
||||
cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(cli.apiVersion, chk.Equals, apiVersion)
|
||||
c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
output := cli.getEndpoint(blobServiceName, "", url.Values{})
|
||||
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
output := cli.getEndpoint(blobServiceName, "path", url.Values{})
|
||||
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
params := url.Values{}
|
||||
params.Set("a", "b")
|
||||
params.Set("c", "d")
|
||||
output := cli.getEndpoint(blobServiceName, "", params)
|
||||
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
params := url.Values{}
|
||||
params.Set("a", "b")
|
||||
params.Set("c", "d")
|
||||
output := cli.getEndpoint(blobServiceName, "path", params)
|
||||
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
headers := cli.getStandardHeaders()
|
||||
c.Assert(len(headers), chk.Equals, 2)
|
||||
c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion)
|
||||
if _, ok := headers["x-ms-date"]; !ok {
|
||||
c.Fatal("Missing date header")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
type test struct{ url, expected string }
|
||||
tests := []test{
|
||||
{"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"},
|
||||
{"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"},
|
||||
{"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"},
|
||||
}
|
||||
|
||||
for _, i := range tests {
|
||||
out, err := cli.buildCanonicalizedResource(i.url)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(out, chk.Equals, i.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) {
|
||||
cli, err := NewBasicClient("foo", "YmFy")
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
type test struct {
|
||||
headers map[string]string
|
||||
expected string
|
||||
}
|
||||
tests := []test{
|
||||
{map[string]string{}, ""},
|
||||
{map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"},
|
||||
{map[string]string{"foo:": "bar"}, ""},
|
||||
{map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"},
|
||||
{map[string]string{
|
||||
"x-ms-version": "9999-99-99",
|
||||
"x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}}
|
||||
|
||||
for _, i := range tests {
|
||||
c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) {
|
||||
// attempt to delete a nonexisting container
|
||||
_, err := getBlobClient(c).deleteContainer(randContainer())
|
||||
c.Assert(err, chk.NotNil)
|
||||
|
||||
v, ok := err.(AzureStorageServiceError)
|
||||
c.Check(ok, chk.Equals, true)
|
||||
c.Assert(v.StatusCode, chk.Equals, 404)
|
||||
c.Assert(v.Code, chk.Equals, "ContainerNotFound")
|
||||
c.Assert(v.Code, chk.Not(chk.Equals), "")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) {
|
||||
key := base64.StdEncoding.EncodeToString([]byte("bar"))
|
||||
cli, err := NewBasicClient("foo", key)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
canonicalizedString := `foobarzoo`
|
||||
expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=`
|
||||
c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected)
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type StorageFileSuite struct{}
|
||||
|
||||
var _ = chk.Suite(&StorageFileSuite{})
|
||||
|
||||
func getFileClient(c *chk.C) FileServiceClient {
|
||||
return getBasicClient(c).GetFileService()
|
||||
}
|
||||
|
||||
func (s *StorageFileSuite) Test_pathForFileShare(c *chk.C) {
|
||||
c.Assert(pathForFileShare("foo"), chk.Equals, "/foo")
|
||||
}
|
||||
|
||||
func (s *StorageFileSuite) TestCreateShareDeleteShare(c *chk.C) {
|
||||
cli := getFileClient(c)
|
||||
name := randShare()
|
||||
c.Assert(cli.CreateShare(name), chk.IsNil)
|
||||
c.Assert(cli.DeleteShare(name), chk.IsNil)
|
||||
}
|
||||
|
||||
func (s *StorageFileSuite) TestCreateShareIfNotExists(c *chk.C) {
|
||||
cli := getFileClient(c)
|
||||
name := randShare()
|
||||
defer cli.DeleteShare(name)
|
||||
|
||||
// First create
|
||||
ok, err := cli.CreateShareIfNotExists(name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
|
||||
// Second create, should not give errors
|
||||
ok, err = cli.CreateShareIfNotExists(name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
}
|
||||
|
||||
func (s *StorageFileSuite) TestDeleteShareIfNotExists(c *chk.C) {
|
||||
cli := getFileClient(c)
|
||||
name := randShare()
|
||||
|
||||
// delete non-existing share
|
||||
ok, err := cli.DeleteShareIfExists(name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
|
||||
c.Assert(cli.CreateShare(name), chk.IsNil)
|
||||
|
||||
// delete existing share
|
||||
ok, err = cli.DeleteShareIfExists(name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
}
|
||||
|
||||
const testSharePrefix = "zzzzztest"
|
||||
|
||||
func randShare() string {
|
||||
return testSharePrefix + randString(32-len(testSharePrefix))
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type StorageQueueSuite struct{}
|
||||
|
||||
var _ = chk.Suite(&StorageQueueSuite{})
|
||||
|
||||
func getQueueClient(c *chk.C) QueueServiceClient {
|
||||
return getBasicClient(c).GetQueueService()
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) {
|
||||
c.Assert(pathForQueue("q"), chk.Equals, "/q")
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) {
|
||||
c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages")
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) {
|
||||
c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m")
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) {
|
||||
cli := getQueueClient(c)
|
||||
name := randString(20)
|
||||
c.Assert(cli.CreateQueue(name), chk.IsNil)
|
||||
c.Assert(cli.DeleteQueue(name), chk.IsNil)
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) Test_GetMetadata_GetApproximateCount(c *chk.C) {
|
||||
cli := getQueueClient(c)
|
||||
name := randString(20)
|
||||
c.Assert(cli.CreateQueue(name), chk.IsNil)
|
||||
defer cli.DeleteQueue(name)
|
||||
|
||||
qm, err := cli.GetMetadata(name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(qm.ApproximateMessageCount, chk.Equals, 0)
|
||||
|
||||
for ix := 0; ix < 3; ix++ {
|
||||
err = cli.PutMessage(name, "foobar", PutMessageParameters{})
|
||||
c.Assert(err, chk.IsNil)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
qm, err = cli.GetMetadata(name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(qm.ApproximateMessageCount, chk.Equals, 3)
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) Test_SetMetadataGetMetadata_Roundtrips(c *chk.C) {
|
||||
cli := getQueueClient(c)
|
||||
name := randString(20)
|
||||
c.Assert(cli.CreateQueue(name), chk.IsNil)
|
||||
defer cli.DeleteQueue(name)
|
||||
|
||||
metadata := make(map[string]string)
|
||||
metadata["Foo1"] = "bar1"
|
||||
metadata["fooBaz"] = "bar"
|
||||
err := cli.SetMetadata(name, metadata)
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
qm, err := cli.GetMetadata(name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(qm.UserDefinedMetadata["foo1"], chk.Equals, "bar1")
|
||||
c.Assert(qm.UserDefinedMetadata["foobaz"], chk.Equals, "bar")
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestQueueExists(c *chk.C) {
|
||||
cli := getQueueClient(c)
|
||||
ok, err := cli.QueueExists("nonexistent-queue")
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, false)
|
||||
|
||||
name := randString(20)
|
||||
c.Assert(cli.CreateQueue(name), chk.IsNil)
|
||||
defer cli.DeleteQueue(name)
|
||||
|
||||
ok, err = cli.QueueExists(name)
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(ok, chk.Equals, true)
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestPostMessage_PeekMessage_DeleteMessage(c *chk.C) {
|
||||
q := randString(20)
|
||||
cli := getQueueClient(c)
|
||||
c.Assert(cli.CreateQueue(q), chk.IsNil)
|
||||
defer cli.DeleteQueue(q)
|
||||
|
||||
msg := randString(64 * 1024) // exercise max length
|
||||
c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil)
|
||||
r, err := cli.PeekMessages(q, PeekMessagesParameters{})
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(r.QueueMessagesList), chk.Equals, 1)
|
||||
c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg)
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestGetMessages(c *chk.C) {
|
||||
q := randString(20)
|
||||
cli := getQueueClient(c)
|
||||
c.Assert(cli.CreateQueue(q), chk.IsNil)
|
||||
defer cli.DeleteQueue(q)
|
||||
|
||||
n := 4
|
||||
for i := 0; i < n; i++ {
|
||||
c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil)
|
||||
}
|
||||
|
||||
r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n})
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(r.QueueMessagesList), chk.Equals, n)
|
||||
}
|
||||
|
||||
func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) {
|
||||
q := randString(20)
|
||||
cli := getQueueClient(c)
|
||||
c.Assert(cli.CreateQueue(q), chk.IsNil)
|
||||
defer cli.DeleteQueue(q)
|
||||
|
||||
c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil)
|
||||
r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1})
|
||||
c.Assert(err, chk.IsNil)
|
||||
c.Assert(len(r.QueueMessagesList), chk.Equals, 1)
|
||||
m := r.QueueMessagesList[0]
|
||||
c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil)
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) {
|
||||
now := time.Now().UTC()
|
||||
expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
c.Assert(timeRfc1123Formatted(now), chk.Equals, now.Format(expectedLayout))
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_mergeParams(c *chk.C) {
|
||||
v1 := url.Values{
|
||||
"k1": {"v1"},
|
||||
"k2": {"v2"}}
|
||||
v2 := url.Values{
|
||||
"k1": {"v11"},
|
||||
"k3": {"v3"}}
|
||||
out := mergeParams(v1, v2)
|
||||
c.Assert(out.Get("k1"), chk.Equals, "v1")
|
||||
c.Assert(out.Get("k2"), chk.Equals, "v2")
|
||||
c.Assert(out.Get("k3"), chk.Equals, "v3")
|
||||
c.Assert(out["k1"], chk.DeepEquals, []string{"v1", "v11"})
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) {
|
||||
empty := []Block{}
|
||||
expected := `<?xml version="1.0" encoding="utf-8"?><BlockList></BlockList>`
|
||||
c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected)
|
||||
|
||||
blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}}
|
||||
expected = `<?xml version="1.0" encoding="utf-8"?><BlockList><Latest>foo</Latest><Uncommitted>bar</Uncommitted></BlockList>`
|
||||
c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected)
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_xmlUnmarshal(c *chk.C) {
|
||||
xml := `<?xml version="1.0" encoding="utf-8"?>
|
||||
<Blob>
|
||||
<Name>myblob</Name>
|
||||
</Blob>`
|
||||
var blob Blob
|
||||
body := ioutil.NopCloser(strings.NewReader(xml))
|
||||
c.Assert(xmlUnmarshal(body, &blob), chk.IsNil)
|
||||
c.Assert(blob.Name, chk.Equals, "myblob")
|
||||
}
|
||||
|
||||
func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) {
|
||||
type t struct {
|
||||
XMLName xml.Name `xml:"S"`
|
||||
Name string `xml:"Name"`
|
||||
}
|
||||
|
||||
b := t{Name: "myblob"}
|
||||
expected := `<S><Name>myblob</Name></S>`
|
||||
r, i, err := xmlMarshal(b)
|
||||
c.Assert(err, chk.IsNil)
|
||||
o, err := ioutil.ReadAll(r)
|
||||
c.Assert(err, chk.IsNil)
|
||||
out := string(o)
|
||||
c.Assert(out, chk.Equals, expected)
|
||||
c.Assert(i, chk.Equals, len(expected))
|
||||
}
|
|
@ -1,236 +0,0 @@
|
|||
package statuscake
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAuth_validate(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
auth := &Auth{}
|
||||
err := auth.validate()
|
||||
|
||||
require.NotNil(err)
|
||||
assert.Contains(err.Error(), "Username is required")
|
||||
assert.Contains(err.Error(), "Apikey is required")
|
||||
|
||||
auth.Username = "foo"
|
||||
err = auth.validate()
|
||||
|
||||
require.NotNil(err)
|
||||
assert.Equal("Apikey is required", err.Error())
|
||||
|
||||
auth.Apikey = "bar"
|
||||
err = auth.validate()
|
||||
assert.Nil(err)
|
||||
}
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
c, err := New(Auth{Username: "random-user", Apikey: "my-pass"})
|
||||
require.Nil(err)
|
||||
|
||||
assert.Equal("random-user", c.username)
|
||||
assert.Equal("my-pass", c.apiKey)
|
||||
}
|
||||
|
||||
func TestClient_newRequest(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c, err := New(Auth{Username: "random-user", Apikey: "my-pass"})
|
||||
require.Nil(err)
|
||||
|
||||
r, err := c.newRequest("GET", "/hello", nil, nil)
|
||||
|
||||
require.Nil(err)
|
||||
assert.Equal("GET", r.Method)
|
||||
assert.Equal("https://www.statuscake.com/API/hello", r.URL.String())
|
||||
assert.Equal("random-user", r.Header.Get("Username"))
|
||||
assert.Equal("my-pass", r.Header.Get("API"))
|
||||
}
|
||||
|
||||
func TestClient_doRequest(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c, err := New(Auth{Username: "random-user", Apikey: "my-pass"})
|
||||
require.Nil(err)
|
||||
|
||||
hc := &fakeHTTPClient{StatusCode: 200}
|
||||
c.c = hc
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/test", nil)
|
||||
require.Nil(err)
|
||||
|
||||
_, err = c.doRequest(req)
|
||||
require.Nil(err)
|
||||
|
||||
assert.Len(hc.requests, 1)
|
||||
assert.Equal("http://example.com/test", hc.requests[0].URL.String())
|
||||
}
|
||||
|
||||
func TestClient_doRequest_WithHTTPErrors(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c, err := New(Auth{Username: "random-user", Apikey: "my-pass"})
|
||||
require.Nil(err)
|
||||
|
||||
hc := &fakeHTTPClient{
|
||||
StatusCode: 500,
|
||||
}
|
||||
c.c = hc
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/test", nil)
|
||||
require.Nil(err)
|
||||
|
||||
_, err = c.doRequest(req)
|
||||
require.NotNil(err)
|
||||
assert.IsType(&httpError{}, err)
|
||||
}
|
||||
|
||||
func TestClient_doRequest_HttpAuthenticationErrors(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c, err := New(Auth{Username: "random-user", Apikey: "my-pass"})
|
||||
require.Nil(err)
|
||||
|
||||
hc := &fakeHTTPClient{
|
||||
StatusCode: 200,
|
||||
Fixture: "auth_error.json",
|
||||
}
|
||||
c.c = hc
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/test", nil)
|
||||
require.Nil(err)
|
||||
|
||||
_, err = c.doRequest(req)
|
||||
require.NotNil(err)
|
||||
assert.IsType(&AuthenticationError{}, err)
|
||||
}
|
||||
|
||||
func TestClient_get(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
c, err := New(Auth{Username: "random-user", Apikey: "my-pass"})
|
||||
require.Nil(err)
|
||||
|
||||
hc := &fakeHTTPClient{}
|
||||
c.c = hc
|
||||
|
||||
c.get("/hello", nil)
|
||||
assert.Len(hc.requests, 1)
|
||||
assert.Equal("GET", hc.requests[0].Method)
|
||||
assert.Equal("https://www.statuscake.com/API/hello", hc.requests[0].URL.String())
|
||||
}
|
||||
|
||||
func TestClient_put(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
c, err := New(Auth{Username: "random-user", Apikey: "my-pass"})
|
||||
require.Nil(err)
|
||||
|
||||
hc := &fakeHTTPClient{}
|
||||
c.c = hc
|
||||
|
||||
v := url.Values{"foo": {"bar"}}
|
||||
c.put("/hello", v)
|
||||
assert.Len(hc.requests, 1)
|
||||
assert.Equal("PUT", hc.requests[0].Method)
|
||||
assert.Equal("https://www.statuscake.com/API/hello", hc.requests[0].URL.String())
|
||||
|
||||
b, err := ioutil.ReadAll(hc.requests[0].Body)
|
||||
require.Nil(err)
|
||||
assert.Equal("foo=bar", string(b))
|
||||
}
|
||||
|
||||
func TestClient_delete(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
c, err := New(Auth{Username: "random-user", Apikey: "my-pass"})
|
||||
require.Nil(err)
|
||||
|
||||
hc := &fakeHTTPClient{}
|
||||
c.c = hc
|
||||
|
||||
v := url.Values{"foo": {"bar"}}
|
||||
c.delete("/hello", v)
|
||||
assert.Len(hc.requests, 1)
|
||||
assert.Equal("DELETE", hc.requests[0].Method)
|
||||
assert.Equal("https://www.statuscake.com/API/hello?foo=bar", hc.requests[0].URL.String())
|
||||
}
|
||||
|
||||
func TestClient_Tests(t *testing.T) {
|
||||
require := require.New(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
c, err := New(Auth{Username: "random-user", Apikey: "my-pass"})
|
||||
require.Nil(err)
|
||||
|
||||
expected := &tests{
|
||||
client: c,
|
||||
}
|
||||
|
||||
assert.Equal(expected, c.Tests())
|
||||
}
|
||||
|
||||
type fakeBody struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (f *fakeBody) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeHTTPClient struct {
|
||||
StatusCode int
|
||||
Fixture string
|
||||
requests []*http.Request
|
||||
}
|
||||
|
||||
func (c *fakeHTTPClient) Do(r *http.Request) (*http.Response, error) {
|
||||
c.requests = append(c.requests, r)
|
||||
var body []byte
|
||||
|
||||
if c.Fixture != "" {
|
||||
p := filepath.Join("fixtures", c.Fixture)
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
body = b
|
||||
}
|
||||
|
||||
resp := &http.Response{
|
||||
StatusCode: c.StatusCode,
|
||||
Body: &fakeBody{Reader: bytes.NewReader(body)},
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
|
@ -1,327 +0,0 @@
|
|||
package statuscake
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTest_Validate(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
test := &Test{
|
||||
Timeout: 200,
|
||||
Confirmation: 100,
|
||||
Public: 200,
|
||||
Virus: 200,
|
||||
TestType: "FTP",
|
||||
RealBrowser: 100,
|
||||
TriggerRate: 100,
|
||||
CheckRate: 100000,
|
||||
WebsiteName: "",
|
||||
WebsiteURL: "",
|
||||
}
|
||||
|
||||
err := test.Validate()
|
||||
require.NotNil(err)
|
||||
|
||||
message := err.Error()
|
||||
assert.Contains(message, "WebsiteName is required")
|
||||
assert.Contains(message, "WebsiteURL is required")
|
||||
assert.Contains(message, "Timeout must be 0 or between 6 and 99")
|
||||
assert.Contains(message, "Confirmation must be between 0 and 9")
|
||||
assert.Contains(message, "CheckRate must be between 0 and 23999")
|
||||
assert.Contains(message, "Public must be 0 or 1")
|
||||
assert.Contains(message, "Virus must be 0 or 1")
|
||||
assert.Contains(message, "TestType must be HTTP, TCP, or PING")
|
||||
assert.Contains(message, "RealBrowser must be 0 or 1")
|
||||
assert.Contains(message, "TriggerRate must be between 0 and 59")
|
||||
|
||||
test.Timeout = 10
|
||||
test.Confirmation = 2
|
||||
test.Public = 1
|
||||
test.Virus = 1
|
||||
test.TestType = "HTTP"
|
||||
test.RealBrowser = 1
|
||||
test.TriggerRate = 50
|
||||
test.CheckRate = 10
|
||||
test.WebsiteName = "Foo"
|
||||
test.WebsiteURL = "http://example.com"
|
||||
|
||||
err = test.Validate()
|
||||
assert.Nil(err)
|
||||
}
|
||||
|
||||
func TestTest_ToURLValues(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
test := &Test{
|
||||
TestID: 123,
|
||||
Paused: true,
|
||||
WebsiteName: "Foo Bar",
|
||||
WebsiteURL: "http://example.com",
|
||||
Port: 3000,
|
||||
NodeLocations: []string{"foo", "bar"},
|
||||
Timeout: 11,
|
||||
PingURL: "http://example.com/ping",
|
||||
Confirmation: 1,
|
||||
CheckRate: 500,
|
||||
BasicUser: "myuser",
|
||||
BasicPass: "mypass",
|
||||
Public: 1,
|
||||
LogoImage: "http://example.com/logo.jpg",
|
||||
Branding: 1,
|
||||
Virus: 1,
|
||||
FindString: "hello",
|
||||
DoNotFind: true,
|
||||
TestType: "HTTP",
|
||||
RealBrowser: 1,
|
||||
TriggerRate: 50,
|
||||
TestTags: "tag1,tag2",
|
||||
StatusCodes: "500",
|
||||
}
|
||||
|
||||
expected := url.Values{
|
||||
"TestID": {"123"},
|
||||
"Paused": {"1"},
|
||||
"WebsiteName": {"Foo Bar"},
|
||||
"WebsiteURL": {"http://example.com"},
|
||||
"Port": {"3000"},
|
||||
"NodeLocations": {"foo,bar"},
|
||||
"Timeout": {"11"},
|
||||
"PingURL": {"http://example.com/ping"},
|
||||
"Confirmation": {"1"},
|
||||
"CheckRate": {"500"},
|
||||
"BasicUser": {"myuser"},
|
||||
"BasicPass": {"mypass"},
|
||||
"Public": {"1"},
|
||||
"LogoImage": {"http://example.com/logo.jpg"},
|
||||
"Branding": {"1"},
|
||||
"Virus": {"1"},
|
||||
"FindString": {"hello"},
|
||||
"DoNotFind": {"1"},
|
||||
"TestType": {"HTTP"},
|
||||
"RealBrowser": {"1"},
|
||||
"TriggerRate": {"50"},
|
||||
"TestTags": {"tag1,tag2"},
|
||||
"StatusCodes": {"500"},
|
||||
}
|
||||
|
||||
assert.Equal(expected, test.ToURLValues())
|
||||
|
||||
test.TestID = 0
|
||||
delete(expected, "TestID")
|
||||
|
||||
assert.Equal(expected.Encode(), test.ToURLValues().Encode())
|
||||
}
|
||||
|
||||
func TestTests_All(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c := &fakeAPIClient{
|
||||
fixture: "tests_all_ok.json",
|
||||
}
|
||||
tt := newTests(c)
|
||||
tests, err := tt.All()
|
||||
require.Nil(err)
|
||||
|
||||
assert.Equal("/Tests", c.sentRequestPath)
|
||||
assert.Equal("GET", c.sentRequestMethod)
|
||||
assert.Nil(c.sentRequestValues)
|
||||
assert.Len(tests, 2)
|
||||
|
||||
expectedTest := &Test{
|
||||
TestID: 100,
|
||||
Paused: false,
|
||||
TestType: "HTTP",
|
||||
WebsiteName: "www 1",
|
||||
ContactID: 1,
|
||||
Status: "Up",
|
||||
Uptime: 100,
|
||||
}
|
||||
assert.Equal(expectedTest, tests[0])
|
||||
|
||||
expectedTest = &Test{
|
||||
TestID: 101,
|
||||
Paused: true,
|
||||
TestType: "HTTP",
|
||||
WebsiteName: "www 2",
|
||||
ContactID: 2,
|
||||
Status: "Down",
|
||||
Uptime: 0,
|
||||
}
|
||||
assert.Equal(expectedTest, tests[1])
|
||||
}
|
||||
|
||||
func TestTests_Update_OK(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c := &fakeAPIClient{
|
||||
fixture: "tests_update_ok.json",
|
||||
}
|
||||
tt := newTests(c)
|
||||
|
||||
test1 := &Test{
|
||||
WebsiteName: "foo",
|
||||
}
|
||||
|
||||
test2, err := tt.Update(test1)
|
||||
require.Nil(err)
|
||||
|
||||
assert.Equal("/Tests/Update", c.sentRequestPath)
|
||||
assert.Equal("PUT", c.sentRequestMethod)
|
||||
assert.NotNil(c.sentRequestValues)
|
||||
assert.NotNil(test2)
|
||||
|
||||
assert.Equal(1234, test2.TestID)
|
||||
}
|
||||
|
||||
func TestTests_Update_Error(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c := &fakeAPIClient{
|
||||
fixture: "tests_update_error.json",
|
||||
}
|
||||
tt := newTests(c)
|
||||
|
||||
test1 := &Test{
|
||||
WebsiteName: "foo",
|
||||
}
|
||||
|
||||
test2, err := tt.Update(test1)
|
||||
assert.Nil(test2)
|
||||
|
||||
require.NotNil(err)
|
||||
assert.IsType(&updateError{}, err)
|
||||
assert.Contains(err.Error(), "issue a")
|
||||
}
|
||||
|
||||
func TestTests_Update_ErrorWithSliceOfIssues(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c := &fakeAPIClient{
|
||||
fixture: "tests_update_error_slice_of_issues.json",
|
||||
}
|
||||
tt := newTests(c)
|
||||
|
||||
test1 := &Test{
|
||||
WebsiteName: "foo",
|
||||
}
|
||||
|
||||
test2, err := tt.Update(test1)
|
||||
assert.Nil(test2)
|
||||
|
||||
require.NotNil(err)
|
||||
assert.IsType(&updateError{}, err)
|
||||
assert.Equal("hello, world", err.Error())
|
||||
}
|
||||
|
||||
func TestTests_Delete_OK(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c := &fakeAPIClient{
|
||||
fixture: "tests_delete_ok.json",
|
||||
}
|
||||
tt := newTests(c)
|
||||
|
||||
err := tt.Delete(1234)
|
||||
require.Nil(err)
|
||||
|
||||
assert.Equal("/Tests/Details", c.sentRequestPath)
|
||||
assert.Equal("DELETE", c.sentRequestMethod)
|
||||
assert.Equal(url.Values{"TestID": {"1234"}}, c.sentRequestValues)
|
||||
}
|
||||
|
||||
func TestTests_Delete_Error(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c := &fakeAPIClient{
|
||||
fixture: "tests_delete_error.json",
|
||||
}
|
||||
tt := newTests(c)
|
||||
|
||||
err := tt.Delete(1234)
|
||||
require.NotNil(err)
|
||||
assert.Equal("this is an error", err.Error())
|
||||
}
|
||||
|
||||
func TestTests_Detail_OK(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
c := &fakeAPIClient{
|
||||
fixture: "tests_detail_ok.json",
|
||||
}
|
||||
tt := newTests(c)
|
||||
|
||||
test, err := tt.Detail(1234)
|
||||
require.Nil(err)
|
||||
|
||||
assert.Equal("/Tests/Details", c.sentRequestPath)
|
||||
assert.Equal("GET", c.sentRequestMethod)
|
||||
assert.Equal(url.Values{"TestID": {"1234"}}, c.sentRequestValues)
|
||||
|
||||
assert.Equal(test.TestID, 6735)
|
||||
assert.Equal(test.TestType, "HTTP")
|
||||
assert.Equal(test.Paused, false)
|
||||
assert.Equal(test.WebsiteName, "NL")
|
||||
assert.Equal(test.ContactID, 536)
|
||||
assert.Equal(test.Status, "Up")
|
||||
assert.Equal(test.Uptime, 0.0)
|
||||
assert.Equal(test.CheckRate, 60)
|
||||
assert.Equal(test.Timeout, 40)
|
||||
assert.Equal(test.LogoImage, "")
|
||||
assert.Equal(test.WebsiteHost, "Various")
|
||||
assert.Equal(test.FindString, "")
|
||||
assert.Equal(test.DoNotFind, false)
|
||||
}
|
||||
|
||||
type fakeAPIClient struct {
|
||||
sentRequestPath string
|
||||
sentRequestMethod string
|
||||
sentRequestValues url.Values
|
||||
fixture string
|
||||
}
|
||||
|
||||
func (c *fakeAPIClient) put(path string, v url.Values) (*http.Response, error) {
|
||||
return c.all("PUT", path, v)
|
||||
}
|
||||
|
||||
func (c *fakeAPIClient) delete(path string, v url.Values) (*http.Response, error) {
|
||||
return c.all("DELETE", path, v)
|
||||
}
|
||||
|
||||
func (c *fakeAPIClient) get(path string, v url.Values) (*http.Response, error) {
|
||||
return c.all("GET", path, v)
|
||||
}
|
||||
|
||||
func (c *fakeAPIClient) all(method string, path string, v url.Values) (*http.Response, error) {
|
||||
c.sentRequestMethod = method
|
||||
c.sentRequestPath = path
|
||||
c.sentRequestValues = v
|
||||
|
||||
p := filepath.Join("fixtures", c.fixture)
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
resp := &http.Response{
|
||||
Body: f,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
Copyright (c) 2015 Martin Atkins
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -1,238 +0,0 @@
|
|||
package cidr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSubnet(t *testing.T) {
|
||||
type Case struct {
|
||||
Base string
|
||||
Bits int
|
||||
Num int
|
||||
Output string
|
||||
Error bool
|
||||
}
|
||||
|
||||
cases := []Case{
|
||||
Case{
|
||||
Base: "192.168.2.0/20",
|
||||
Bits: 4,
|
||||
Num: 6,
|
||||
Output: "192.168.6.0/24",
|
||||
},
|
||||
Case{
|
||||
Base: "192.168.2.0/20",
|
||||
Bits: 4,
|
||||
Num: 0,
|
||||
Output: "192.168.0.0/24",
|
||||
},
|
||||
Case{
|
||||
Base: "192.168.0.0/31",
|
||||
Bits: 1,
|
||||
Num: 1,
|
||||
Output: "192.168.0.1/32",
|
||||
},
|
||||
Case{
|
||||
Base: "192.168.0.0/21",
|
||||
Bits: 4,
|
||||
Num: 7,
|
||||
Output: "192.168.3.128/25",
|
||||
},
|
||||
Case{
|
||||
Base: "fe80::/48",
|
||||
Bits: 16,
|
||||
Num: 6,
|
||||
Output: "fe80:0:0:6::/64",
|
||||
},
|
||||
Case{
|
||||
Base: "fe80::/49",
|
||||
Bits: 16,
|
||||
Num: 7,
|
||||
Output: "fe80:0:0:3:8000::/65",
|
||||
},
|
||||
Case{
|
||||
Base: "192.168.2.0/31",
|
||||
Bits: 2,
|
||||
Num: 0,
|
||||
Error: true, // not enough bits to expand into
|
||||
},
|
||||
Case{
|
||||
Base: "fe80::/126",
|
||||
Bits: 4,
|
||||
Num: 0,
|
||||
Error: true, // not enough bits to expand into
|
||||
},
|
||||
Case{
|
||||
Base: "192.168.2.0/24",
|
||||
Bits: 4,
|
||||
Num: 16,
|
||||
Error: true, // can't fit 16 into 4 bits
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
_, base, _ := net.ParseCIDR(testCase.Base)
|
||||
gotNet, err := Subnet(base, testCase.Bits, testCase.Num)
|
||||
desc := fmt.Sprintf("Subnet(%#v,%#v,%#v)", testCase.Base, testCase.Bits, testCase.Num)
|
||||
if err != nil {
|
||||
if !testCase.Error {
|
||||
t.Errorf("%s failed: %s", desc, err.Error())
|
||||
}
|
||||
} else {
|
||||
got := gotNet.String()
|
||||
if testCase.Error {
|
||||
t.Errorf("%s = %s; want error", desc, got)
|
||||
} else {
|
||||
if got != testCase.Output {
|
||||
t.Errorf("%s = %s; want %s", desc, got, testCase.Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHost(t *testing.T) {
|
||||
type Case struct {
|
||||
Range string
|
||||
Num int
|
||||
Output string
|
||||
Error bool
|
||||
}
|
||||
|
||||
cases := []Case{
|
||||
Case{
|
||||
Range: "192.168.2.0/20",
|
||||
Num: 6,
|
||||
Output: "192.168.0.6",
|
||||
},
|
||||
Case{
|
||||
Range: "192.168.0.0/20",
|
||||
Num: 257,
|
||||
Output: "192.168.1.1",
|
||||
},
|
||||
Case{
|
||||
Range: "192.168.1.0/24",
|
||||
Num: 256,
|
||||
Error: true, // only 0-255 will fit in 8 bits
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
_, network, _ := net.ParseCIDR(testCase.Range)
|
||||
gotIP, err := Host(network, testCase.Num)
|
||||
desc := fmt.Sprintf("Host(%#v,%#v)", testCase.Range, testCase.Num)
|
||||
if err != nil {
|
||||
if !testCase.Error {
|
||||
t.Errorf("%s failed: %s", desc, err.Error())
|
||||
}
|
||||
} else {
|
||||
got := gotIP.String()
|
||||
if testCase.Error {
|
||||
t.Errorf("%s = %s; want error", desc, got)
|
||||
} else {
|
||||
if got != testCase.Output {
|
||||
t.Errorf("%s = %s; want %s", desc, got, testCase.Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressRange(t *testing.T) {
|
||||
type Case struct {
|
||||
Range string
|
||||
First string
|
||||
Last string
|
||||
}
|
||||
|
||||
cases := []Case{
|
||||
Case{
|
||||
Range: "192.168.0.0/16",
|
||||
First: "192.168.0.0",
|
||||
Last: "192.168.255.255",
|
||||
},
|
||||
Case{
|
||||
Range: "192.168.0.0/17",
|
||||
First: "192.168.0.0",
|
||||
Last: "192.168.127.255",
|
||||
},
|
||||
Case{
|
||||
Range: "fe80::/64",
|
||||
First: "fe80::",
|
||||
Last: "fe80::ffff:ffff:ffff:ffff",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
_, network, _ := net.ParseCIDR(testCase.Range)
|
||||
firstIP, lastIP := AddressRange(network)
|
||||
desc := fmt.Sprintf("AddressRange(%#v)", testCase.Range)
|
||||
gotFirstIP := firstIP.String()
|
||||
gotLastIP := lastIP.String()
|
||||
if gotFirstIP != testCase.First {
|
||||
t.Errorf("%s first is %s; want %s", desc, gotFirstIP, testCase.First)
|
||||
}
|
||||
if gotLastIP != testCase.Last {
|
||||
t.Errorf("%s last is %s; want %s", desc, gotLastIP, testCase.Last)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAddressCount(t *testing.T) {
|
||||
type Case struct {
|
||||
Range string
|
||||
Count uint64
|
||||
}
|
||||
|
||||
cases := []Case{
|
||||
Case{
|
||||
Range: "192.168.0.0/16",
|
||||
Count: 65536,
|
||||
},
|
||||
Case{
|
||||
Range: "192.168.0.0/17",
|
||||
Count: 32768,
|
||||
},
|
||||
Case{
|
||||
Range: "192.168.0.0/32",
|
||||
Count: 1,
|
||||
},
|
||||
Case{
|
||||
Range: "192.168.0.0/31",
|
||||
Count: 2,
|
||||
},
|
||||
Case{
|
||||
Range: "0.0.0.0/0",
|
||||
Count: 4294967296,
|
||||
},
|
||||
Case{
|
||||
Range: "0.0.0.0/1",
|
||||
Count: 2147483648,
|
||||
},
|
||||
Case{
|
||||
Range: "::/65",
|
||||
Count: 9223372036854775808,
|
||||
},
|
||||
Case{
|
||||
Range: "::/128",
|
||||
Count: 1,
|
||||
},
|
||||
Case{
|
||||
Range: "::/127",
|
||||
Count: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
_, network, _ := net.ParseCIDR(testCase.Range)
|
||||
gotCount := AddressCount(network)
|
||||
desc := fmt.Sprintf("AddressCount(%#v)", testCase.Range)
|
||||
if gotCount != testCase.Count {
|
||||
t.Errorf("%s = %d; want %d", desc, gotCount, testCase.Count)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Martin Atkins
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,113 +0,0 @@
|
|||
package rundeck
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUnmarshalJobDetail(t *testing.T) {
|
||||
testUnmarshalXML(t, []unmarshalTest{
|
||||
unmarshalTest{
|
||||
"with-config",
|
||||
`<job><uuid>baz</uuid><dispatch><rankOrder>ascending</rankOrder></dispatch></job>`,
|
||||
&JobDetail{},
|
||||
func (rv interface {}) error {
|
||||
v := rv.(*JobDetail)
|
||||
if v.ID != "baz" {
|
||||
return fmt.Errorf("got ID %s, but expecting baz", v.ID)
|
||||
}
|
||||
if v.Dispatch.RankOrder != "ascending" {
|
||||
return fmt.Errorf("Dispatch.RankOrder = \"%v\", but expecting \"ascending\"", v.Dispatch.RankOrder)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
unmarshalTest{
|
||||
"with-empty-config",
|
||||
`<JobPlugin type="foo-plugin"><configuration/></JobPlugin>`,
|
||||
&JobPlugin{},
|
||||
func (rv interface {}) error {
|
||||
v := rv.(*JobPlugin)
|
||||
if v.Type != "foo-plugin" {
|
||||
return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type)
|
||||
}
|
||||
if len(v.Config) != 0 {
|
||||
return fmt.Errorf("got %i Config values, but expecting 0", len(v.Config))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMarshalJobPlugin(t *testing.T) {
|
||||
testMarshalXML(t, []marshalTest{
|
||||
marshalTest{
|
||||
"with-config",
|
||||
JobPlugin{
|
||||
Type: "foo-plugin",
|
||||
Config: map[string]string{
|
||||
"woo": "foo",
|
||||
"bar": "baz",
|
||||
},
|
||||
},
|
||||
`<JobPlugin type="foo-plugin"><configuration><entry key="bar" value="baz"></entry><entry key="woo" value="foo"></entry></configuration></JobPlugin>`,
|
||||
},
|
||||
marshalTest{
|
||||
"with-empty-config",
|
||||
JobPlugin{
|
||||
Type: "foo-plugin",
|
||||
Config: map[string]string{},
|
||||
},
|
||||
`<JobPlugin type="foo-plugin"></JobPlugin>`,
|
||||
},
|
||||
marshalTest{
|
||||
"with-zero-value-config",
|
||||
JobPlugin{
|
||||
Type: "foo-plugin",
|
||||
},
|
||||
`<JobPlugin type="foo-plugin"></JobPlugin>`,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnmarshalJobPlugin(t *testing.T) {
|
||||
testUnmarshalXML(t, []unmarshalTest{
|
||||
unmarshalTest{
|
||||
"with-config",
|
||||
`<JobPlugin type="foo-plugin"><configuration><entry key="woo" value="foo"/><entry key="bar" value="baz"/></configuration></JobPlugin>`,
|
||||
&JobPlugin{},
|
||||
func (rv interface {}) error {
|
||||
v := rv.(*JobPlugin)
|
||||
if v.Type != "foo-plugin" {
|
||||
return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type)
|
||||
}
|
||||
if len(v.Config) != 2 {
|
||||
return fmt.Errorf("got %v Config values, but expecting 2", len(v.Config))
|
||||
}
|
||||
if v.Config["woo"] != "foo" {
|
||||
return fmt.Errorf("Config[\"woo\"] = \"%s\", but expecting \"foo\"", v.Config["woo"])
|
||||
}
|
||||
if v.Config["bar"] != "baz" {
|
||||
return fmt.Errorf("Config[\"bar\"] = \"%s\", but expecting \"baz\"", v.Config["bar"])
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
unmarshalTest{
|
||||
"with-empty-config",
|
||||
`<JobPlugin type="foo-plugin"><configuration/></JobPlugin>`,
|
||||
&JobPlugin{},
|
||||
func (rv interface {}) error {
|
||||
v := rv.(*JobPlugin)
|
||||
if v.Type != "foo-plugin" {
|
||||
return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type)
|
||||
}
|
||||
if len(v.Config) != 0 {
|
||||
return fmt.Errorf("got %i Config values, but expecting 0", len(v.Config))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,198 +0,0 @@
|
|||
package circbuf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBuffer_Impl(t *testing.T) {
|
||||
var _ io.Writer = &Buffer{}
|
||||
}
|
||||
|
||||
func TestBuffer_ShortWrite(t *testing.T) {
|
||||
buf, err := NewBuffer(1024)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
inp := []byte("hello world")
|
||||
|
||||
n, err := buf.Write(inp)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if n != len(inp) {
|
||||
t.Fatalf("bad: %v", n)
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), inp) {
|
||||
t.Fatalf("bad: %v", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_FullWrite(t *testing.T) {
|
||||
inp := []byte("hello world")
|
||||
|
||||
buf, err := NewBuffer(int64(len(inp)))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
n, err := buf.Write(inp)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if n != len(inp) {
|
||||
t.Fatalf("bad: %v", n)
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), inp) {
|
||||
t.Fatalf("bad: %v", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_LongWrite(t *testing.T) {
|
||||
inp := []byte("hello world")
|
||||
|
||||
buf, err := NewBuffer(6)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
n, err := buf.Write(inp)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if n != len(inp) {
|
||||
t.Fatalf("bad: %v", n)
|
||||
}
|
||||
|
||||
expect := []byte(" world")
|
||||
if !bytes.Equal(buf.Bytes(), expect) {
|
||||
t.Fatalf("bad: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_HugeWrite(t *testing.T) {
|
||||
inp := []byte("hello world")
|
||||
|
||||
buf, err := NewBuffer(3)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
n, err := buf.Write(inp)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if n != len(inp) {
|
||||
t.Fatalf("bad: %v", n)
|
||||
}
|
||||
|
||||
expect := []byte("rld")
|
||||
if !bytes.Equal(buf.Bytes(), expect) {
|
||||
t.Fatalf("bad: %s", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_ManySmall(t *testing.T) {
|
||||
inp := []byte("hello world")
|
||||
|
||||
buf, err := NewBuffer(3)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for _, b := range inp {
|
||||
n, err := buf.Write([]byte{b})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatalf("bad: %v", n)
|
||||
}
|
||||
}
|
||||
|
||||
expect := []byte("rld")
|
||||
if !bytes.Equal(buf.Bytes(), expect) {
|
||||
t.Fatalf("bad: %v", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_MultiPart(t *testing.T) {
|
||||
inputs := [][]byte{
|
||||
[]byte("hello world\n"),
|
||||
[]byte("this is a test\n"),
|
||||
[]byte("my cool input\n"),
|
||||
}
|
||||
total := 0
|
||||
|
||||
buf, err := NewBuffer(16)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for _, b := range inputs {
|
||||
total += len(b)
|
||||
n, err := buf.Write(b)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if n != len(b) {
|
||||
t.Fatalf("bad: %v", n)
|
||||
}
|
||||
}
|
||||
|
||||
if int64(total) != buf.TotalWritten() {
|
||||
t.Fatalf("bad total")
|
||||
}
|
||||
|
||||
expect := []byte("t\nmy cool input\n")
|
||||
if !bytes.Equal(buf.Bytes(), expect) {
|
||||
t.Fatalf("bad: %v", buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_Reset(t *testing.T) {
|
||||
// Write a bunch of data
|
||||
inputs := [][]byte{
|
||||
[]byte("hello world\n"),
|
||||
[]byte("this is a test\n"),
|
||||
[]byte("my cool input\n"),
|
||||
}
|
||||
|
||||
buf, err := NewBuffer(4)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for _, b := range inputs {
|
||||
n, err := buf.Write(b)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if n != len(b) {
|
||||
t.Fatalf("bad: %v", n)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset it
|
||||
buf.Reset()
|
||||
|
||||
// Write more data
|
||||
input := []byte("hello")
|
||||
n, err := buf.Write(input)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if n != len(input) {
|
||||
t.Fatalf("bad: %v", n)
|
||||
}
|
||||
|
||||
// Test the output
|
||||
expect := []byte("ello")
|
||||
if !bytes.Equal(buf.Bytes(), expect) {
|
||||
t.Fatalf("bad: %v", string(buf.Bytes()))
|
||||
}
|
||||
}
|
|
@ -1,319 +0,0 @@
|
|||
package radix
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRadix(t *testing.T) {
|
||||
var min, max string
|
||||
inp := make(map[string]interface{})
|
||||
for i := 0; i < 1000; i++ {
|
||||
gen := generateUUID()
|
||||
inp[gen] = i
|
||||
if gen < min || i == 0 {
|
||||
min = gen
|
||||
}
|
||||
if gen > max || i == 0 {
|
||||
max = gen
|
||||
}
|
||||
}
|
||||
|
||||
r := NewFromMap(inp)
|
||||
if r.Len() != len(inp) {
|
||||
t.Fatalf("bad length: %v %v", r.Len(), len(inp))
|
||||
}
|
||||
|
||||
r.Walk(func(k string, v interface{}) bool {
|
||||
println(k)
|
||||
return false
|
||||
})
|
||||
|
||||
for k, v := range inp {
|
||||
out, ok := r.Get(k)
|
||||
if !ok {
|
||||
t.Fatalf("missing key: %v", k)
|
||||
}
|
||||
if out != v {
|
||||
t.Fatalf("value mis-match: %v %v", out, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Check min and max
|
||||
outMin, _, _ := r.Minimum()
|
||||
if outMin != min {
|
||||
t.Fatalf("bad minimum: %v %v", outMin, min)
|
||||
}
|
||||
outMax, _, _ := r.Maximum()
|
||||
if outMax != max {
|
||||
t.Fatalf("bad maximum: %v %v", outMax, max)
|
||||
}
|
||||
|
||||
for k, v := range inp {
|
||||
out, ok := r.Delete(k)
|
||||
if !ok {
|
||||
t.Fatalf("missing key: %v", k)
|
||||
}
|
||||
if out != v {
|
||||
t.Fatalf("value mis-match: %v %v", out, v)
|
||||
}
|
||||
}
|
||||
if r.Len() != 0 {
|
||||
t.Fatalf("bad length: %v", r.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoot(t *testing.T) {
|
||||
r := New()
|
||||
_, ok := r.Delete("")
|
||||
if ok {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
_, ok = r.Insert("", true)
|
||||
if ok {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
val, ok := r.Get("")
|
||||
if !ok || val != true {
|
||||
t.Fatalf("bad: %v", val)
|
||||
}
|
||||
val, ok = r.Delete("")
|
||||
if !ok || val != true {
|
||||
t.Fatalf("bad: %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
|
||||
r := New()
|
||||
|
||||
s := []string{"", "A", "AB"}
|
||||
|
||||
for _, ss := range s {
|
||||
r.Insert(ss, true)
|
||||
}
|
||||
|
||||
for _, ss := range s {
|
||||
_, ok := r.Delete(ss)
|
||||
if !ok {
|
||||
t.Fatalf("bad %q", ss)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLongestPrefix(t *testing.T) {
|
||||
r := New()
|
||||
|
||||
keys := []string{
|
||||
"",
|
||||
"foo",
|
||||
"foobar",
|
||||
"foobarbaz",
|
||||
"foobarbazzip",
|
||||
"foozip",
|
||||
}
|
||||
for _, k := range keys {
|
||||
r.Insert(k, nil)
|
||||
}
|
||||
if r.Len() != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(), len(keys))
|
||||
}
|
||||
|
||||
type exp struct {
|
||||
inp string
|
||||
out string
|
||||
}
|
||||
cases := []exp{
|
||||
{"a", ""},
|
||||
{"abc", ""},
|
||||
{"fo", ""},
|
||||
{"foo", "foo"},
|
||||
{"foob", "foo"},
|
||||
{"foobar", "foobar"},
|
||||
{"foobarba", "foobar"},
|
||||
{"foobarbaz", "foobarbaz"},
|
||||
{"foobarbazzi", "foobarbaz"},
|
||||
{"foobarbazzip", "foobarbazzip"},
|
||||
{"foozi", "foo"},
|
||||
{"foozip", "foozip"},
|
||||
{"foozipzap", "foozip"},
|
||||
}
|
||||
for _, test := range cases {
|
||||
m, _, ok := r.LongestPrefix(test.inp)
|
||||
if !ok {
|
||||
t.Fatalf("no match: %v", test)
|
||||
}
|
||||
if m != test.out {
|
||||
t.Fatalf("mis-match: %v %v", m, test)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkPrefix(t *testing.T) {
|
||||
r := New()
|
||||
|
||||
keys := []string{
|
||||
"foobar",
|
||||
"foo/bar/baz",
|
||||
"foo/baz/bar",
|
||||
"foo/zip/zap",
|
||||
"zipzap",
|
||||
}
|
||||
for _, k := range keys {
|
||||
r.Insert(k, nil)
|
||||
}
|
||||
if r.Len() != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(), len(keys))
|
||||
}
|
||||
|
||||
type exp struct {
|
||||
inp string
|
||||
out []string
|
||||
}
|
||||
cases := []exp{
|
||||
{
|
||||
"f",
|
||||
[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
|
||||
},
|
||||
{
|
||||
"foo",
|
||||
[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
|
||||
},
|
||||
{
|
||||
"foob",
|
||||
[]string{"foobar"},
|
||||
},
|
||||
{
|
||||
"foo/",
|
||||
[]string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
|
||||
},
|
||||
{
|
||||
"foo/b",
|
||||
[]string{"foo/bar/baz", "foo/baz/bar"},
|
||||
},
|
||||
{
|
||||
"foo/ba",
|
||||
[]string{"foo/bar/baz", "foo/baz/bar"},
|
||||
},
|
||||
{
|
||||
"foo/bar",
|
||||
[]string{"foo/bar/baz"},
|
||||
},
|
||||
{
|
||||
"foo/bar/baz",
|
||||
[]string{"foo/bar/baz"},
|
||||
},
|
||||
{
|
||||
"foo/bar/bazoo",
|
||||
[]string{},
|
||||
},
|
||||
{
|
||||
"z",
|
||||
[]string{"zipzap"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
out := []string{}
|
||||
fn := func(s string, v interface{}) bool {
|
||||
out = append(out, s)
|
||||
return false
|
||||
}
|
||||
r.WalkPrefix(test.inp, fn)
|
||||
sort.Strings(out)
|
||||
sort.Strings(test.out)
|
||||
if !reflect.DeepEqual(out, test.out) {
|
||||
t.Fatalf("mis-match: %v %v", out, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkPath(t *testing.T) {
|
||||
r := New()
|
||||
|
||||
keys := []string{
|
||||
"foo",
|
||||
"foo/bar",
|
||||
"foo/bar/baz",
|
||||
"foo/baz/bar",
|
||||
"foo/zip/zap",
|
||||
"zipzap",
|
||||
}
|
||||
for _, k := range keys {
|
||||
r.Insert(k, nil)
|
||||
}
|
||||
if r.Len() != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(), len(keys))
|
||||
}
|
||||
|
||||
type exp struct {
|
||||
inp string
|
||||
out []string
|
||||
}
|
||||
cases := []exp{
|
||||
{
|
||||
"f",
|
||||
[]string{},
|
||||
},
|
||||
{
|
||||
"foo",
|
||||
[]string{"foo"},
|
||||
},
|
||||
{
|
||||
"foo/",
|
||||
[]string{"foo"},
|
||||
},
|
||||
{
|
||||
"foo/ba",
|
||||
[]string{"foo"},
|
||||
},
|
||||
{
|
||||
"foo/bar",
|
||||
[]string{"foo", "foo/bar"},
|
||||
},
|
||||
{
|
||||
"foo/bar/baz",
|
||||
[]string{"foo", "foo/bar", "foo/bar/baz"},
|
||||
},
|
||||
{
|
||||
"foo/bar/bazoo",
|
||||
[]string{"foo", "foo/bar", "foo/bar/baz"},
|
||||
},
|
||||
{
|
||||
"z",
|
||||
[]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
out := []string{}
|
||||
fn := func(s string, v interface{}) bool {
|
||||
out = append(out, s)
|
||||
return false
|
||||
}
|
||||
r.WalkPath(test.inp, fn)
|
||||
sort.Strings(out)
|
||||
sort.Strings(test.out)
|
||||
if !reflect.DeepEqual(out, test.out) {
|
||||
t.Fatalf("mis-match: %v %v", out, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// generateUUID is used to generate a random UUID
|
||||
func generateUUID() string {
|
||||
buf := make([]byte, 16)
|
||||
if _, err := crand.Read(buf); err != nil {
|
||||
panic(fmt.Errorf("failed to read random bytes: %v", err))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
|
||||
buf[0:4],
|
||||
buf[4:6],
|
||||
buf[6:8],
|
||||
buf[8:10],
|
||||
buf[10:16])
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,3 @@
|
|||
AWS SDK for Go
|
||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
Copyright 2014-2015 Stripe, Inc.
|
|
@ -1,233 +0,0 @@
|
|||
package awsutil_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func ExampleCopy() {
|
||||
type Foo struct {
|
||||
A int
|
||||
B []*string
|
||||
}
|
||||
|
||||
// Create the initial value
|
||||
str1 := "hello"
|
||||
str2 := "bye bye"
|
||||
f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
|
||||
|
||||
// Do the copy
|
||||
var f2 Foo
|
||||
awsutil.Copy(&f2, f1)
|
||||
|
||||
// Print the result
|
||||
fmt.Println(awsutil.Prettify(f2))
|
||||
|
||||
// Output:
|
||||
// {
|
||||
// A: 1,
|
||||
// B: ["hello","bye bye"]
|
||||
// }
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
type Foo struct {
|
||||
A int
|
||||
B []*string
|
||||
C map[string]*int
|
||||
}
|
||||
|
||||
// Create the initial value
|
||||
str1 := "hello"
|
||||
str2 := "bye bye"
|
||||
int1 := 1
|
||||
int2 := 2
|
||||
f1 := &Foo{
|
||||
A: 1,
|
||||
B: []*string{&str1, &str2},
|
||||
C: map[string]*int{
|
||||
"A": &int1,
|
||||
"B": &int2,
|
||||
},
|
||||
}
|
||||
|
||||
// Do the copy
|
||||
var f2 Foo
|
||||
awsutil.Copy(&f2, f1)
|
||||
|
||||
// Values are equal
|
||||
assert.Equal(t, f2.A, f1.A)
|
||||
assert.Equal(t, f2.B, f1.B)
|
||||
assert.Equal(t, f2.C, f1.C)
|
||||
|
||||
// But pointers are not!
|
||||
str3 := "nothello"
|
||||
int3 := 57
|
||||
f2.A = 100
|
||||
f2.B[0] = &str3
|
||||
f2.C["B"] = &int3
|
||||
assert.NotEqual(t, f2.A, f1.A)
|
||||
assert.NotEqual(t, f2.B, f1.B)
|
||||
assert.NotEqual(t, f2.C, f1.C)
|
||||
}
|
||||
|
||||
func TestCopyNestedWithUnexported(t *testing.T) {
|
||||
type Bar struct {
|
||||
a int
|
||||
B int
|
||||
}
|
||||
type Foo struct {
|
||||
A string
|
||||
B Bar
|
||||
}
|
||||
|
||||
f1 := &Foo{A: "string", B: Bar{a: 1, B: 2}}
|
||||
|
||||
var f2 Foo
|
||||
awsutil.Copy(&f2, f1)
|
||||
|
||||
// Values match
|
||||
assert.Equal(t, f2.A, f1.A)
|
||||
assert.NotEqual(t, f2.B, f1.B)
|
||||
assert.NotEqual(t, f2.B.a, f1.B.a)
|
||||
assert.Equal(t, f2.B.B, f2.B.B)
|
||||
}
|
||||
|
||||
func TestCopyIgnoreNilMembers(t *testing.T) {
|
||||
type Foo struct {
|
||||
A *string
|
||||
B []string
|
||||
C map[string]string
|
||||
}
|
||||
|
||||
f := &Foo{}
|
||||
assert.Nil(t, f.A)
|
||||
assert.Nil(t, f.B)
|
||||
assert.Nil(t, f.C)
|
||||
|
||||
var f2 Foo
|
||||
awsutil.Copy(&f2, f)
|
||||
assert.Nil(t, f2.A)
|
||||
assert.Nil(t, f2.B)
|
||||
assert.Nil(t, f2.C)
|
||||
|
||||
fcopy := awsutil.CopyOf(f)
|
||||
f3 := fcopy.(*Foo)
|
||||
assert.Nil(t, f3.A)
|
||||
assert.Nil(t, f3.B)
|
||||
assert.Nil(t, f3.C)
|
||||
}
|
||||
|
||||
func TestCopyPrimitive(t *testing.T) {
|
||||
str := "hello"
|
||||
var s string
|
||||
awsutil.Copy(&s, &str)
|
||||
assert.Equal(t, "hello", s)
|
||||
}
|
||||
|
||||
func TestCopyNil(t *testing.T) {
|
||||
var s string
|
||||
awsutil.Copy(&s, nil)
|
||||
assert.Equal(t, "", s)
|
||||
}
|
||||
|
||||
func TestCopyReader(t *testing.T) {
|
||||
var buf io.Reader = bytes.NewReader([]byte("hello world"))
|
||||
var r io.Reader
|
||||
awsutil.Copy(&r, buf)
|
||||
b, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("hello world"), b)
|
||||
|
||||
// empty bytes because this is not a deep copy
|
||||
b, err = ioutil.ReadAll(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte(""), b)
|
||||
}
|
||||
|
||||
func TestCopyDifferentStructs(t *testing.T) {
|
||||
type SrcFoo struct {
|
||||
A int
|
||||
B []*string
|
||||
C map[string]*int
|
||||
SrcUnique string
|
||||
SameNameDiffType int
|
||||
unexportedPtr *int
|
||||
ExportedPtr *int
|
||||
}
|
||||
type DstFoo struct {
|
||||
A int
|
||||
B []*string
|
||||
C map[string]*int
|
||||
DstUnique int
|
||||
SameNameDiffType string
|
||||
unexportedPtr *int
|
||||
ExportedPtr *int
|
||||
}
|
||||
|
||||
// Create the initial value
|
||||
str1 := "hello"
|
||||
str2 := "bye bye"
|
||||
int1 := 1
|
||||
int2 := 2
|
||||
f1 := &SrcFoo{
|
||||
A: 1,
|
||||
B: []*string{&str1, &str2},
|
||||
C: map[string]*int{
|
||||
"A": &int1,
|
||||
"B": &int2,
|
||||
},
|
||||
SrcUnique: "unique",
|
||||
SameNameDiffType: 1,
|
||||
unexportedPtr: &int1,
|
||||
ExportedPtr: &int2,
|
||||
}
|
||||
|
||||
// Do the copy
|
||||
var f2 DstFoo
|
||||
awsutil.Copy(&f2, f1)
|
||||
|
||||
// Values are equal
|
||||
assert.Equal(t, f2.A, f1.A)
|
||||
assert.Equal(t, f2.B, f1.B)
|
||||
assert.Equal(t, f2.C, f1.C)
|
||||
assert.Equal(t, "unique", f1.SrcUnique)
|
||||
assert.Equal(t, 1, f1.SameNameDiffType)
|
||||
assert.Equal(t, 0, f2.DstUnique)
|
||||
assert.Equal(t, "", f2.SameNameDiffType)
|
||||
assert.Equal(t, int1, *f1.unexportedPtr)
|
||||
assert.Nil(t, f2.unexportedPtr)
|
||||
assert.Equal(t, int2, *f1.ExportedPtr)
|
||||
assert.Equal(t, int2, *f2.ExportedPtr)
|
||||
}
|
||||
|
||||
func ExampleCopyOf() {
|
||||
type Foo struct {
|
||||
A int
|
||||
B []*string
|
||||
}
|
||||
|
||||
// Create the initial value
|
||||
str1 := "hello"
|
||||
str2 := "bye bye"
|
||||
f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
|
||||
|
||||
// Do the copy
|
||||
v := awsutil.CopyOf(f1)
|
||||
var f2 *Foo = v.(*Foo)
|
||||
|
||||
// Print the result
|
||||
fmt.Println(awsutil.Prettify(f2))
|
||||
|
||||
// Output:
|
||||
// {
|
||||
// A: 1,
|
||||
// B: ["hello","bye bye"]
|
||||
// }
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
package awsutil_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDeepEqual(t *testing.T) {
|
||||
cases := []struct {
|
||||
a, b interface{}
|
||||
equal bool
|
||||
}{
|
||||
{"a", "a", true},
|
||||
{"a", "b", false},
|
||||
{"a", aws.String(""), false},
|
||||
{"a", nil, false},
|
||||
{"a", aws.String("a"), true},
|
||||
{(*bool)(nil), (*bool)(nil), true},
|
||||
{(*bool)(nil), (*string)(nil), false},
|
||||
{nil, nil, true},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
assert.Equal(t, c.equal, awsutil.DeepEqual(c.a, c.b), "%d, a:%v b:%v, %t", i, c.a, c.b, c.equal)
|
||||
}
|
||||
}
|
|
@ -1,142 +0,0 @@
|
|||
package awsutil_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type Struct struct {
|
||||
A []Struct
|
||||
z []Struct
|
||||
B *Struct
|
||||
D *Struct
|
||||
C string
|
||||
E map[string]string
|
||||
}
|
||||
|
||||
var data = Struct{
|
||||
A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
|
||||
z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
|
||||
B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}},
|
||||
C: "initial",
|
||||
}
|
||||
var data2 = Struct{A: []Struct{
|
||||
{A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}},
|
||||
{A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}},
|
||||
}}
|
||||
|
||||
func TestValueAtPathSuccess(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
expect []interface{}
|
||||
data interface{}
|
||||
path string
|
||||
}{
|
||||
{[]interface{}{"initial"}, data, "C"},
|
||||
{[]interface{}{"value1"}, data, "A[0].C"},
|
||||
{[]interface{}{"value2"}, data, "A[1].C"},
|
||||
{[]interface{}{"value3"}, data, "A[2].C"},
|
||||
{[]interface{}{"value3"}, data, "a[2].c"},
|
||||
{[]interface{}{"value3"}, data, "A[-1].C"},
|
||||
{[]interface{}{"value1", "value2", "value3"}, data, "A[].C"},
|
||||
{[]interface{}{"terminal"}, data, "B . B . C"},
|
||||
{[]interface{}{"initial"}, data, "A.D.X || C"},
|
||||
{[]interface{}{"initial"}, data, "A[0].B || C"},
|
||||
{[]interface{}{
|
||||
Struct{A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}},
|
||||
Struct{A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}},
|
||||
}, data2, "A"},
|
||||
}
|
||||
for i, c := range testCases {
|
||||
v, err := awsutil.ValuesAtPath(c.data, c.path)
|
||||
assert.NoError(t, err, "case %d, expected no error, %s", i, c.path)
|
||||
assert.Equal(t, c.expect, v, "case %d, %s", i, c.path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueAtPathFailure(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
expect []interface{}
|
||||
errContains string
|
||||
data interface{}
|
||||
path string
|
||||
}{
|
||||
{nil, "", data, "C.x"},
|
||||
{nil, "SyntaxError: Invalid token: tDot", data, ".x"},
|
||||
{nil, "", data, "X.Y.Z"},
|
||||
{nil, "", data, "A[100].C"},
|
||||
{nil, "", data, "A[3].C"},
|
||||
{nil, "", data, "B.B.C.Z"},
|
||||
{nil, "", data, "z[-1].C"},
|
||||
{nil, "", nil, "A.B.C"},
|
||||
{[]interface{}{}, "", Struct{}, "A"},
|
||||
{nil, "", data, "A[0].B.C"},
|
||||
{nil, "", data, "D"},
|
||||
}
|
||||
|
||||
for i, c := range testCases {
|
||||
v, err := awsutil.ValuesAtPath(c.data, c.path)
|
||||
if c.errContains != "" {
|
||||
assert.Contains(t, err.Error(), c.errContains, "case %d, expected error, %s", i, c.path)
|
||||
continue
|
||||
} else {
|
||||
assert.NoError(t, err, "case %d, expected no error, %s", i, c.path)
|
||||
}
|
||||
assert.Equal(t, c.expect, v, "case %d, %s", i, c.path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetValueAtPathSuccess(t *testing.T) {
|
||||
var s Struct
|
||||
awsutil.SetValueAtPath(&s, "C", "test1")
|
||||
awsutil.SetValueAtPath(&s, "B.B.C", "test2")
|
||||
awsutil.SetValueAtPath(&s, "B.D.C", "test3")
|
||||
assert.Equal(t, "test1", s.C)
|
||||
assert.Equal(t, "test2", s.B.B.C)
|
||||
assert.Equal(t, "test3", s.B.D.C)
|
||||
|
||||
awsutil.SetValueAtPath(&s, "B.*.C", "test0")
|
||||
assert.Equal(t, "test0", s.B.B.C)
|
||||
assert.Equal(t, "test0", s.B.D.C)
|
||||
|
||||
var s2 Struct
|
||||
awsutil.SetValueAtPath(&s2, "b.b.c", "test0")
|
||||
assert.Equal(t, "test0", s2.B.B.C)
|
||||
awsutil.SetValueAtPath(&s2, "A", []Struct{{}})
|
||||
assert.Equal(t, []Struct{{}}, s2.A)
|
||||
|
||||
str := "foo"
|
||||
|
||||
s3 := Struct{}
|
||||
awsutil.SetValueAtPath(&s3, "b.b.c", str)
|
||||
assert.Equal(t, "foo", s3.B.B.C)
|
||||
|
||||
s3 = Struct{B: &Struct{B: &Struct{C: str}}}
|
||||
awsutil.SetValueAtPath(&s3, "b.b.c", nil)
|
||||
assert.Equal(t, "", s3.B.B.C)
|
||||
|
||||
s3 = Struct{}
|
||||
awsutil.SetValueAtPath(&s3, "b.b.c", nil)
|
||||
assert.Equal(t, "", s3.B.B.C)
|
||||
|
||||
s3 = Struct{}
|
||||
awsutil.SetValueAtPath(&s3, "b.b.c", &str)
|
||||
assert.Equal(t, "foo", s3.B.B.C)
|
||||
|
||||
var s4 struct{ Name *string }
|
||||
awsutil.SetValueAtPath(&s4, "Name", str)
|
||||
assert.Equal(t, str, *s4.Name)
|
||||
|
||||
s4 = struct{ Name *string }{}
|
||||
awsutil.SetValueAtPath(&s4, "Name", nil)
|
||||
assert.Equal(t, (*string)(nil), s4.Name)
|
||||
|
||||
s4 = struct{ Name *string }{Name: &str}
|
||||
awsutil.SetValueAtPath(&s4, "Name", nil)
|
||||
assert.Equal(t, (*string)(nil), s4.Name)
|
||||
|
||||
s4 = struct{ Name *string }{}
|
||||
awsutil.SetValueAtPath(&s4, "Name", &str)
|
||||
assert.Equal(t, str, *s4.Name)
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
var testCredentials = credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
|
||||
|
||||
var copyTestConfig = Config{
|
||||
Credentials: testCredentials,
|
||||
Endpoint: String("CopyTestEndpoint"),
|
||||
Region: String("COPY_TEST_AWS_REGION"),
|
||||
DisableSSL: Bool(true),
|
||||
HTTPClient: http.DefaultClient,
|
||||
LogLevel: LogLevel(LogDebug),
|
||||
Logger: NewDefaultLogger(),
|
||||
MaxRetries: Int(3),
|
||||
DisableParamValidation: Bool(true),
|
||||
DisableComputeChecksums: Bool(true),
|
||||
S3ForcePathStyle: Bool(true),
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
want := copyTestConfig
|
||||
got := copyTestConfig.Copy()
|
||||
if !reflect.DeepEqual(*got, want) {
|
||||
t.Errorf("Copy() = %+v", got)
|
||||
t.Errorf(" want %+v", want)
|
||||
}
|
||||
|
||||
got.Region = String("other")
|
||||
if got.Region == want.Region {
|
||||
t.Errorf("Expect setting copy values not not reflect in source")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyReturnsNewInstance(t *testing.T) {
|
||||
want := copyTestConfig
|
||||
got := copyTestConfig.Copy()
|
||||
if got == &want {
|
||||
t.Errorf("Copy() = %p; want different instance as source %p", got, &want)
|
||||
}
|
||||
}
|
||||
|
||||
var mergeTestZeroValueConfig = Config{}
|
||||
|
||||
var mergeTestConfig = Config{
|
||||
Credentials: testCredentials,
|
||||
Endpoint: String("MergeTestEndpoint"),
|
||||
Region: String("MERGE_TEST_AWS_REGION"),
|
||||
DisableSSL: Bool(true),
|
||||
HTTPClient: http.DefaultClient,
|
||||
LogLevel: LogLevel(LogDebug),
|
||||
Logger: NewDefaultLogger(),
|
||||
MaxRetries: Int(10),
|
||||
DisableParamValidation: Bool(true),
|
||||
DisableComputeChecksums: Bool(true),
|
||||
S3ForcePathStyle: Bool(true),
|
||||
}
|
||||
|
||||
var mergeTests = []struct {
|
||||
cfg *Config
|
||||
in *Config
|
||||
want *Config
|
||||
}{
|
||||
{&Config{}, nil, &Config{}},
|
||||
{&Config{}, &mergeTestZeroValueConfig, &Config{}},
|
||||
{&Config{}, &mergeTestConfig, &mergeTestConfig},
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
for i, tt := range mergeTests {
|
||||
got := tt.cfg.Copy()
|
||||
got.MergeIn(tt.in)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Config %d %+v", i, tt.cfg)
|
||||
t.Errorf(" Merge(%+v)", tt.in)
|
||||
t.Errorf(" got %+v", got)
|
||||
t.Errorf(" want %+v", tt.want)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,437 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var testCasesStringSlice = [][]string{
|
||||
{"a", "b", "c", "d", "e"},
|
||||
{"a", "b", "", "", "e"},
|
||||
}
|
||||
|
||||
func TestStringSlice(t *testing.T) {
|
||||
for idx, in := range testCasesStringSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := StringSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := StringValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesStringValueSlice = [][]*string{
|
||||
{String("a"), String("b"), nil, String("c")},
|
||||
}
|
||||
|
||||
func TestStringValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesStringValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := StringValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := StringSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesStringMap = []map[string]string{
|
||||
{"a": "1", "b": "2", "c": "3"},
|
||||
}
|
||||
|
||||
func TestStringMap(t *testing.T) {
|
||||
for idx, in := range testCasesStringMap {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := StringMap(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := StringValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesBoolSlice = [][]bool{
|
||||
{true, true, false, false},
|
||||
}
|
||||
|
||||
func TestBoolSlice(t *testing.T) {
|
||||
for idx, in := range testCasesBoolSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := BoolSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := BoolValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesBoolValueSlice = [][]*bool{}
|
||||
|
||||
func TestBoolValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesBoolValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := BoolValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := BoolSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesBoolMap = []map[string]bool{
|
||||
{"a": true, "b": false, "c": true},
|
||||
}
|
||||
|
||||
func TestBoolMap(t *testing.T) {
|
||||
for idx, in := range testCasesBoolMap {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := BoolMap(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := BoolValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesIntSlice = [][]int{
|
||||
{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
func TestIntSlice(t *testing.T) {
|
||||
for idx, in := range testCasesIntSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := IntSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := IntValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesIntValueSlice = [][]*int{}
|
||||
|
||||
func TestIntValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesIntValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := IntValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := IntSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesIntMap = []map[string]int{
|
||||
{"a": 3, "b": 2, "c": 1},
|
||||
}
|
||||
|
||||
func TestIntMap(t *testing.T) {
|
||||
for idx, in := range testCasesIntMap {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := IntMap(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := IntValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesInt64Slice = [][]int64{
|
||||
{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
func TestInt64Slice(t *testing.T) {
|
||||
for idx, in := range testCasesInt64Slice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := Int64Slice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := Int64ValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesInt64ValueSlice = [][]*int64{}
|
||||
|
||||
func TestInt64ValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesInt64ValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := Int64ValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := Int64Slice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesInt64Map = []map[string]int64{
|
||||
{"a": 3, "b": 2, "c": 1},
|
||||
}
|
||||
|
||||
func TestInt64Map(t *testing.T) {
|
||||
for idx, in := range testCasesInt64Map {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := Int64Map(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := Int64ValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesFloat64Slice = [][]float64{
|
||||
{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
func TestFloat64Slice(t *testing.T) {
|
||||
for idx, in := range testCasesFloat64Slice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := Float64Slice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := Float64ValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesFloat64ValueSlice = [][]*float64{}
|
||||
|
||||
func TestFloat64ValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesFloat64ValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := Float64ValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := Float64Slice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesFloat64Map = []map[string]float64{
|
||||
{"a": 3, "b": 2, "c": 1},
|
||||
}
|
||||
|
||||
func TestFloat64Map(t *testing.T) {
|
||||
for idx, in := range testCasesFloat64Map {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := Float64Map(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := Float64ValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesTimeSlice = [][]time.Time{
|
||||
{time.Now(), time.Now().AddDate(100, 0, 0)},
|
||||
}
|
||||
|
||||
func TestTimeSlice(t *testing.T) {
|
||||
for idx, in := range testCasesTimeSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := TimeSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := TimeValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesTimeValueSlice = [][]*time.Time{}
|
||||
|
||||
func TestTimeValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesTimeValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := TimeValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := TimeSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesTimeMap = []map[string]time.Time{
|
||||
{"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()},
|
||||
}
|
||||
|
||||
func TestTimeMap(t *testing.T) {
|
||||
for idx, in := range testCasesTimeMap {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := TimeMap(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := TimeValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
|
@ -1,113 +0,0 @@
|
|||
package corehandlers_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/awstesting"
|
||||
)
|
||||
|
||||
func TestValidateEndpointHandler(t *testing.T) {
|
||||
os.Clearenv()
|
||||
|
||||
svc := awstesting.NewClient(aws.NewConfig().WithRegion("us-west-2"))
|
||||
svc.Handlers.Clear()
|
||||
svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||
|
||||
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
||||
err := req.Build()
|
||||
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
|
||||
os.Clearenv()
|
||||
|
||||
svc := awstesting.NewClient()
|
||||
svc.Handlers.Clear()
|
||||
svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||
|
||||
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
||||
err := req.Build()
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, aws.ErrMissingRegion, err)
|
||||
}
|
||||
|
||||
type mockCredsProvider struct {
|
||||
expired bool
|
||||
retrieveCalled bool
|
||||
}
|
||||
|
||||
func (m *mockCredsProvider) Retrieve() (credentials.Value, error) {
|
||||
m.retrieveCalled = true
|
||||
return credentials.Value{ProviderName: "mockCredsProvider"}, nil
|
||||
}
|
||||
|
||||
func (m *mockCredsProvider) IsExpired() bool {
|
||||
return m.expired
|
||||
}
|
||||
|
||||
func TestAfterRetryRefreshCreds(t *testing.T) {
|
||||
os.Clearenv()
|
||||
credProvider := &mockCredsProvider{}
|
||||
|
||||
svc := awstesting.NewClient(&aws.Config{
|
||||
Credentials: credentials.NewCredentials(credProvider),
|
||||
MaxRetries: aws.Int(1),
|
||||
})
|
||||
|
||||
svc.Handlers.Clear()
|
||||
svc.Handlers.ValidateResponse.PushBack(func(r *request.Request) {
|
||||
r.Error = awserr.New("UnknownError", "", nil)
|
||||
r.HTTPResponse = &http.Response{StatusCode: 400}
|
||||
})
|
||||
svc.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
|
||||
r.Error = awserr.New("ExpiredTokenException", "", nil)
|
||||
})
|
||||
svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
||||
|
||||
assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired")
|
||||
assert.False(t, credProvider.retrieveCalled)
|
||||
|
||||
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
||||
req.Send()
|
||||
|
||||
assert.True(t, svc.Config.Credentials.IsExpired())
|
||||
assert.False(t, credProvider.retrieveCalled)
|
||||
|
||||
_, err := svc.Config.Credentials.Get()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, credProvider.retrieveCalled)
|
||||
}
|
||||
|
||||
type testSendHandlerTransport struct{}
|
||||
|
||||
func (t *testSendHandlerTransport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return nil, fmt.Errorf("mock error")
|
||||
}
|
||||
|
||||
func TestSendHandlerError(t *testing.T) {
|
||||
svc := awstesting.NewClient(&aws.Config{
|
||||
HTTPClient: &http.Client{
|
||||
Transport: &testSendHandlerTransport{},
|
||||
},
|
||||
})
|
||||
svc.Handlers.Clear()
|
||||
svc.Handlers.Send.PushBackNamed(corehandlers.SendHandler)
|
||||
r := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
||||
|
||||
r.Send()
|
||||
|
||||
assert.Error(t, r.Error)
|
||||
assert.NotNil(t, r.HTTPResponse)
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
package corehandlers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var testSvc = func() *client.Client {
|
||||
s := &client.Client{
|
||||
Config: aws.Config{},
|
||||
ClientInfo: metadata.ClientInfo{
|
||||
ServiceName: "mock-service",
|
||||
APIVersion: "2015-01-01",
|
||||
},
|
||||
}
|
||||
return s
|
||||
}()
|
||||
|
||||
type StructShape struct {
|
||||
RequiredList []*ConditionalStructShape `required:"true"`
|
||||
RequiredMap map[string]*ConditionalStructShape `required:"true"`
|
||||
RequiredBool *bool `required:"true"`
|
||||
OptionalStruct *ConditionalStructShape
|
||||
|
||||
hiddenParameter *string
|
||||
_ struct{}
|
||||
}
|
||||
|
||||
type ConditionalStructShape struct {
|
||||
Name *string `required:"true"`
|
||||
_ struct{}
|
||||
}
|
||||
|
||||
func TestNoErrors(t *testing.T) {
|
||||
input := &StructShape{
|
||||
RequiredList: []*ConditionalStructShape{},
|
||||
RequiredMap: map[string]*ConditionalStructShape{
|
||||
"key1": {Name: aws.String("Name")},
|
||||
"key2": {Name: aws.String("Name")},
|
||||
},
|
||||
RequiredBool: aws.Bool(true),
|
||||
OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")},
|
||||
}
|
||||
|
||||
req := testSvc.NewRequest(&request.Operation{}, input, nil)
|
||||
corehandlers.ValidateParametersHandler.Fn(req)
|
||||
require.NoError(t, req.Error)
|
||||
}
|
||||
|
||||
func TestMissingRequiredParameters(t *testing.T) {
|
||||
input := &StructShape{}
|
||||
req := testSvc.NewRequest(&request.Operation{}, input, nil)
|
||||
corehandlers.ValidateParametersHandler.Fn(req)
|
||||
|
||||
require.Error(t, req.Error)
|
||||
assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
|
||||
assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", req.Error.(awserr.Error).Message())
|
||||
}
|
||||
|
||||
func TestNestedMissingRequiredParameters(t *testing.T) {
|
||||
input := &StructShape{
|
||||
RequiredList: []*ConditionalStructShape{{}},
|
||||
RequiredMap: map[string]*ConditionalStructShape{
|
||||
"key1": {Name: aws.String("Name")},
|
||||
"key2": {},
|
||||
},
|
||||
RequiredBool: aws.Bool(true),
|
||||
OptionalStruct: &ConditionalStructShape{},
|
||||
}
|
||||
|
||||
req := testSvc.NewRequest(&request.Operation{}, input, nil)
|
||||
corehandlers.ValidateParametersHandler.Fn(req)
|
||||
|
||||
require.Error(t, req.Error)
|
||||
assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
|
||||
assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", req.Error.(awserr.Error).Message())
|
||||
}
|
||||
|
||||
type testInput struct {
|
||||
StringField string `min:"5"`
|
||||
PtrStrField *string `min:"2"`
|
||||
ListField []string `min:"3"`
|
||||
MapField map[string]string `min:"4"`
|
||||
}
|
||||
|
||||
var testsFieldMin = []struct {
|
||||
err awserr.Error
|
||||
in testInput
|
||||
}{
|
||||
{
|
||||
err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 5: StringField", nil),
|
||||
in: testInput{StringField: "abcd"},
|
||||
},
|
||||
{
|
||||
err: awserr.New("InvalidParameter", "2 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField", nil),
|
||||
in: testInput{StringField: "abcd", ListField: []string{"a", "b"}},
|
||||
},
|
||||
{
|
||||
err: awserr.New("InvalidParameter", "3 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField\n- field too short, minimum length 4: MapField", nil),
|
||||
in: testInput{StringField: "abcd", ListField: []string{"a", "b"}, MapField: map[string]string{"a": "a", "b": "b"}},
|
||||
},
|
||||
{
|
||||
err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 2: PtrStrField", nil),
|
||||
in: testInput{StringField: "abcde", PtrStrField: aws.String("v")},
|
||||
},
|
||||
{
|
||||
err: nil,
|
||||
in: testInput{StringField: "abcde", PtrStrField: aws.String("value"),
|
||||
ListField: []string{"a", "b", "c"}, MapField: map[string]string{"a": "a", "b": "b", "c": "c", "d": "d"}},
|
||||
},
|
||||
}
|
||||
|
||||
func TestValidateFieldMinParameter(t *testing.T) {
|
||||
for i, c := range testsFieldMin {
|
||||
req := testSvc.NewRequest(&request.Operation{}, &c.in, nil)
|
||||
corehandlers.ValidateParametersHandler.Fn(req)
|
||||
|
||||
require.Equal(t, c.err, req.Error, "%d case failed", i)
|
||||
}
|
||||
}
|
|
@ -1,154 +0,0 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type secondStubProvider struct {
|
||||
creds Value
|
||||
expired bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *secondStubProvider) Retrieve() (Value, error) {
|
||||
s.expired = false
|
||||
s.creds.ProviderName = "secondStubProvider"
|
||||
return s.creds, s.err
|
||||
}
|
||||
func (s *secondStubProvider) IsExpired() bool {
|
||||
return s.expired
|
||||
}
|
||||
|
||||
func TestChainProviderWithNames(t *testing.T) {
|
||||
p := &ChainProvider{
|
||||
Providers: []Provider{
|
||||
&stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
|
||||
&stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
|
||||
&secondStubProvider{
|
||||
creds: Value{
|
||||
AccessKeyID: "AKIF",
|
||||
SecretAccessKey: "NOSECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
},
|
||||
&stubProvider{
|
||||
creds: Value{
|
||||
AccessKeyID: "AKID",
|
||||
SecretAccessKey: "SECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
assert.Equal(t, "secondStubProvider", creds.ProviderName, "Expect provider name to match")
|
||||
|
||||
// Also check credentials
|
||||
assert.Equal(t, "AKIF", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "NOSECRET", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
|
||||
|
||||
}
|
||||
|
||||
func TestChainProviderGet(t *testing.T) {
|
||||
p := &ChainProvider{
|
||||
Providers: []Provider{
|
||||
&stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
|
||||
&stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
|
||||
&stubProvider{
|
||||
creds: Value{
|
||||
AccessKeyID: "AKID",
|
||||
SecretAccessKey: "SECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
|
||||
}
|
||||
|
||||
func TestChainProviderIsExpired(t *testing.T) {
|
||||
stubProvider := &stubProvider{expired: true}
|
||||
p := &ChainProvider{
|
||||
Providers: []Provider{
|
||||
stubProvider,
|
||||
},
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve")
|
||||
_, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
|
||||
|
||||
stubProvider.expired = true
|
||||
assert.True(t, p.IsExpired(), "Expect return of expired provider")
|
||||
|
||||
_, err = p.Retrieve()
|
||||
assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
|
||||
}
|
||||
|
||||
func TestChainProviderWithNoProvider(t *testing.T) {
|
||||
p := &ChainProvider{
|
||||
Providers: []Provider{},
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect expired with no providers")
|
||||
_, err := p.Retrieve()
|
||||
assert.Equal(t,
|
||||
ErrNoValidProvidersFoundInChain,
|
||||
err,
|
||||
"Expect no providers error returned")
|
||||
}
|
||||
|
||||
func TestChainProviderWithNoValidProvider(t *testing.T) {
|
||||
errs := []error{
|
||||
awserr.New("FirstError", "first provider error", nil),
|
||||
awserr.New("SecondError", "second provider error", nil),
|
||||
}
|
||||
p := &ChainProvider{
|
||||
Providers: []Provider{
|
||||
&stubProvider{err: errs[0]},
|
||||
&stubProvider{err: errs[1]},
|
||||
},
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect expired with no providers")
|
||||
_, err := p.Retrieve()
|
||||
|
||||
assert.Equal(t,
|
||||
ErrNoValidProvidersFoundInChain,
|
||||
err,
|
||||
"Expect no providers error returned")
|
||||
}
|
||||
|
||||
func TestChainProviderWithNoValidProviderWithVerboseEnabled(t *testing.T) {
|
||||
errs := []error{
|
||||
awserr.New("FirstError", "first provider error", nil),
|
||||
awserr.New("SecondError", "second provider error", nil),
|
||||
}
|
||||
p := &ChainProvider{
|
||||
VerboseErrors: true,
|
||||
Providers: []Provider{
|
||||
&stubProvider{err: errs[0]},
|
||||
&stubProvider{err: errs[1]},
|
||||
},
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect expired with no providers")
|
||||
_, err := p.Retrieve()
|
||||
|
||||
assert.Equal(t,
|
||||
awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs),
|
||||
err,
|
||||
"Expect no providers error returned")
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type stubProvider struct {
|
||||
creds Value
|
||||
expired bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *stubProvider) Retrieve() (Value, error) {
|
||||
s.expired = false
|
||||
s.creds.ProviderName = "stubProvider"
|
||||
return s.creds, s.err
|
||||
}
|
||||
func (s *stubProvider) IsExpired() bool {
|
||||
return s.expired
|
||||
}
|
||||
|
||||
func TestCredentialsGet(t *testing.T) {
|
||||
c := NewCredentials(&stubProvider{
|
||||
creds: Value{
|
||||
AccessKeyID: "AKID",
|
||||
SecretAccessKey: "SECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
expired: true,
|
||||
})
|
||||
|
||||
creds, err := c.Get()
|
||||
assert.Nil(t, err, "Expected no error")
|
||||
assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
|
||||
}
|
||||
|
||||
func TestCredentialsGetWithError(t *testing.T) {
|
||||
c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true})
|
||||
|
||||
_, err := c.Get()
|
||||
assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error")
|
||||
}
|
||||
|
||||
func TestCredentialsExpire(t *testing.T) {
|
||||
stub := &stubProvider{}
|
||||
c := NewCredentials(stub)
|
||||
|
||||
stub.expired = false
|
||||
assert.True(t, c.IsExpired(), "Expected to start out expired")
|
||||
c.Expire()
|
||||
assert.True(t, c.IsExpired(), "Expected to be expired")
|
||||
|
||||
c.forceRefresh = false
|
||||
assert.False(t, c.IsExpired(), "Expected not to be expired")
|
||||
|
||||
stub.expired = true
|
||||
assert.True(t, c.IsExpired(), "Expected to be expired")
|
||||
}
|
||||
|
||||
func TestCredentialsGetWithProviderName(t *testing.T) {
|
||||
stub := &stubProvider{}
|
||||
|
||||
c := NewCredentials(stub)
|
||||
|
||||
creds, err := c.Get()
|
||||
assert.Nil(t, err, "Expected no error")
|
||||
assert.Equal(t, creds.ProviderName, "stubProvider", "Expected provider name to match")
|
||||
}
|
159
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go
generated
vendored
159
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider_test.go
generated
vendored
|
@ -1,159 +0,0 @@
|
|||
package ec2rolecreds_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
)
|
||||
|
||||
const credsRespTmpl = `{
|
||||
"Code": "Success",
|
||||
"Type": "AWS-HMAC",
|
||||
"AccessKeyId" : "accessKey",
|
||||
"SecretAccessKey" : "secret",
|
||||
"Token" : "token",
|
||||
"Expiration" : "%s",
|
||||
"LastUpdated" : "2009-11-23T0:00:00Z"
|
||||
}`
|
||||
|
||||
const credsFailRespTmpl = `{
|
||||
"Code": "ErrorCode",
|
||||
"Message": "ErrorMsg",
|
||||
"LastUpdated": "2009-11-23T0:00:00Z"
|
||||
}`
|
||||
|
||||
func initTestServer(expireOn string, failAssume bool) *httptest.Server {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/latest/meta-data/iam/security-credentials" {
|
||||
fmt.Fprintln(w, "RoleName")
|
||||
} else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" {
|
||||
if failAssume {
|
||||
fmt.Fprintf(w, credsFailRespTmpl)
|
||||
} else {
|
||||
fmt.Fprintf(w, credsRespTmpl, expireOn)
|
||||
}
|
||||
} else {
|
||||
http.Error(w, "bad request", http.StatusBadRequest)
|
||||
}
|
||||
}))
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
func TestEC2RoleProvider(t *testing.T) {
|
||||
server := initTestServer("2014-12-16T01:51:37Z", false)
|
||||
defer server.Close()
|
||||
|
||||
p := &ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
|
||||
}
|
||||
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error, %v", err)
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func TestEC2RoleProviderFailAssume(t *testing.T) {
|
||||
server := initTestServer("2014-12-16T01:51:37Z", true)
|
||||
defer server.Close()
|
||||
|
||||
p := &ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
|
||||
}
|
||||
|
||||
creds, err := p.Retrieve()
|
||||
assert.Error(t, err, "Expect error")
|
||||
|
||||
e := err.(awserr.Error)
|
||||
assert.Equal(t, "ErrorCode", e.Code())
|
||||
assert.Equal(t, "ErrorMsg", e.Message())
|
||||
assert.Nil(t, e.OrigErr())
|
||||
|
||||
assert.Equal(t, "", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func TestEC2RoleProviderIsExpired(t *testing.T) {
|
||||
server := initTestServer("2014-12-16T01:51:37Z", false)
|
||||
defer server.Close()
|
||||
|
||||
p := &ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
|
||||
}
|
||||
p.CurrentTime = func() time.Time {
|
||||
return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
|
||||
|
||||
_, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error, %v", err)
|
||||
|
||||
assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
|
||||
|
||||
p.CurrentTime = func() time.Time {
|
||||
return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired.")
|
||||
}
|
||||
|
||||
func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) {
|
||||
server := initTestServer("2014-12-16T01:51:37Z", false)
|
||||
defer server.Close()
|
||||
|
||||
p := &ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
|
||||
ExpiryWindow: time.Hour * 1,
|
||||
}
|
||||
p.CurrentTime = func() time.Time {
|
||||
return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC)
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
|
||||
|
||||
_, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error, %v", err)
|
||||
|
||||
assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
|
||||
|
||||
p.CurrentTime = func() time.Time {
|
||||
return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC)
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired.")
|
||||
}
|
||||
|
||||
func BenchmarkEC3RoleProvider(b *testing.B) {
|
||||
server := initTestServer("2014-12-16T01:51:37Z", false)
|
||||
defer server.Close()
|
||||
|
||||
p := &ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
|
||||
}
|
||||
_, err := p.Retrieve()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := p.Retrieve(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEnvProviderRetrieve(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "access")
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
|
||||
os.Setenv("AWS_SESSION_TOKEN", "token")
|
||||
|
||||
e := EnvProvider{}
|
||||
creds, err := e.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func TestEnvProviderIsExpired(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "access")
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
|
||||
os.Setenv("AWS_SESSION_TOKEN", "token")
|
||||
|
||||
e := EnvProvider{}
|
||||
|
||||
assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.")
|
||||
|
||||
_, err := e.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.")
|
||||
}
|
||||
|
||||
func TestEnvProviderNoAccessKeyID(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
|
||||
|
||||
e := EnvProvider{}
|
||||
creds, err := e.Retrieve()
|
||||
assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err)
|
||||
}
|
||||
|
||||
func TestEnvProviderNoSecretAccessKey(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "access")
|
||||
|
||||
e := EnvProvider{}
|
||||
creds, err := e.Retrieve()
|
||||
assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err)
|
||||
}
|
||||
|
||||
func TestEnvProviderAlternateNames(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_ACCESS_KEY", "access")
|
||||
os.Setenv("AWS_SECRET_KEY", "secret")
|
||||
|
||||
e := EnvProvider{}
|
||||
creds, err := e.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key")
|
||||
assert.Empty(t, creds.SessionToken, "Expected no token")
|
||||
}
|
116
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
generated
vendored
116
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
generated
vendored
|
@ -1,116 +0,0 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSharedCredentialsProvider(t *testing.T) {
|
||||
os.Clearenv()
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func TestSharedCredentialsProviderIsExpired(t *testing.T) {
|
||||
os.Clearenv()
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve")
|
||||
|
||||
_, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve")
|
||||
}
|
||||
|
||||
func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILE(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "example.ini")
|
||||
p := SharedCredentialsProvider{}
|
||||
creds, err := p.Retrieve()
|
||||
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILEAbsPath(t *testing.T) {
|
||||
os.Clearenv()
|
||||
wd, err := os.Getwd()
|
||||
assert.NoError(t, err)
|
||||
os.Setenv("AWS_SHARED_CREDENTIALS_FILE", filepath.Join(wd, "example.ini"))
|
||||
p := SharedCredentialsProvider{}
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_PROFILE", "no_token")
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect no token")
|
||||
}
|
||||
|
||||
func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) {
|
||||
os.Clearenv()
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"}
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect no token")
|
||||
}
|
||||
|
||||
func TestSharedCredentialsProviderColonInCredFile(t *testing.T) {
|
||||
os.Clearenv()
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: "with_colon"}
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect no token")
|
||||
}
|
||||
|
||||
func BenchmarkSharedCredentialsProvider(b *testing.B) {
|
||||
os.Clearenv()
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
|
||||
_, err := p.Retrieve()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := p.Retrieve()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStaticProviderGet(t *testing.T) {
|
||||
s := StaticProvider{
|
||||
Value: Value{
|
||||
AccessKeyID: "AKID",
|
||||
SecretAccessKey: "SECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
}
|
||||
|
||||
creds, err := s.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect no session token")
|
||||
}
|
||||
|
||||
func TestStaticProviderIsExpired(t *testing.T) {
|
||||
s := StaticProvider{
|
||||
Value: Value{
|
||||
AccessKeyID: "AKID",
|
||||
SecretAccessKey: "SECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
}
|
||||
|
||||
assert.False(t, s.IsExpired(), "Expect static credentials to never expire")
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue